Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(506)

Side by Side Diff: src/mark-compact.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/mark-compact-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h" 9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h" 10 #include "src/cpu-profiler.h"
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
61 void VisitPointers(Object** start, Object** end) { 61 void VisitPointers(Object** start, Object** end) {
62 for (Object** current = start; current < end; current++) { 62 for (Object** current = start; current < end; current++) {
63 if ((*current)->IsHeapObject()) { 63 if ((*current)->IsHeapObject()) {
64 HeapObject* object = HeapObject::cast(*current); 64 HeapObject* object = HeapObject::cast(*current);
65 CHECK(heap_->mark_compact_collector()->IsMarked(object)); 65 CHECK(heap_->mark_compact_collector()->IsMarked(object));
66 } 66 }
67 } 67 }
68 } 68 }
69 69
70 void VisitEmbeddedPointer(RelocInfo* rinfo) { 70 void VisitEmbeddedPointer(RelocInfo* rinfo) {
71 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); 71 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
72 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) { 72 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
73 Object* p = rinfo->target_object(); 73 Object* p = rinfo->target_object();
74 VisitPointer(&p); 74 VisitPointer(&p);
75 } 75 }
76 } 76 }
77 77
78 void VisitCell(RelocInfo* rinfo) { 78 void VisitCell(RelocInfo* rinfo) {
79 Code* code = rinfo->host(); 79 Code* code = rinfo->host();
80 ASSERT(rinfo->rmode() == RelocInfo::CELL); 80 DCHECK(rinfo->rmode() == RelocInfo::CELL);
81 if (!code->IsWeakObject(rinfo->target_cell())) { 81 if (!code->IsWeakObject(rinfo->target_cell())) {
82 ObjectVisitor::VisitCell(rinfo); 82 ObjectVisitor::VisitCell(rinfo);
83 } 83 }
84 } 84 }
85 85
86 private: 86 private:
87 Heap* heap_; 87 Heap* heap_;
88 }; 88 };
89 89
90 90
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
350 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", 350 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
351 AllocationSpaceName(space->identity()), 351 AllocationSpaceName(space->identity()),
352 number_of_pages, 352 number_of_pages,
353 static_cast<int>(free), 353 static_cast<int>(free),
354 static_cast<double>(free) * 100 / reserved); 354 static_cast<double>(free) * 100 / reserved);
355 } 355 }
356 356
357 357
358 bool MarkCompactCollector::StartCompaction(CompactionMode mode) { 358 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
359 if (!compacting_) { 359 if (!compacting_) {
360 ASSERT(evacuation_candidates_.length() == 0); 360 DCHECK(evacuation_candidates_.length() == 0);
361 361
362 #ifdef ENABLE_GDB_JIT_INTERFACE 362 #ifdef ENABLE_GDB_JIT_INTERFACE
363 // If GDBJIT interface is active disable compaction. 363 // If GDBJIT interface is active disable compaction.
364 if (FLAG_gdbjit) return false; 364 if (FLAG_gdbjit) return false;
365 #endif 365 #endif
366 366
367 CollectEvacuationCandidates(heap()->old_pointer_space()); 367 CollectEvacuationCandidates(heap()->old_pointer_space());
368 CollectEvacuationCandidates(heap()->old_data_space()); 368 CollectEvacuationCandidates(heap()->old_data_space());
369 369
370 if (FLAG_compact_code_space && 370 if (FLAG_compact_code_space &&
(...skipping 17 matching lines...) Expand all
388 compacting_ = evacuation_candidates_.length() > 0; 388 compacting_ = evacuation_candidates_.length() > 0;
389 } 389 }
390 390
391 return compacting_; 391 return compacting_;
392 } 392 }
393 393
394 394
395 void MarkCompactCollector::CollectGarbage() { 395 void MarkCompactCollector::CollectGarbage() {
396 // Make sure that Prepare() has been called. The individual steps below will 396 // Make sure that Prepare() has been called. The individual steps below will
397 // update the state as they proceed. 397 // update the state as they proceed.
398 ASSERT(state_ == PREPARE_GC); 398 DCHECK(state_ == PREPARE_GC);
399 399
400 MarkLiveObjects(); 400 MarkLiveObjects();
401 ASSERT(heap_->incremental_marking()->IsStopped()); 401 DCHECK(heap_->incremental_marking()->IsStopped());
402 402
403 if (FLAG_collect_maps) ClearNonLiveReferences(); 403 if (FLAG_collect_maps) ClearNonLiveReferences();
404 404
405 ClearWeakCollections(); 405 ClearWeakCollections();
406 406
407 #ifdef VERIFY_HEAP 407 #ifdef VERIFY_HEAP
408 if (FLAG_verify_heap) { 408 if (FLAG_verify_heap) {
409 VerifyMarking(heap_); 409 VerifyMarking(heap_);
410 } 410 }
411 #endif 411 #endif
(...skipping 13 matching lines...) Expand all
425 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { 425 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
426 VerifyOmittedMapChecks(); 426 VerifyOmittedMapChecks();
427 } 427 }
428 #endif 428 #endif
429 429
430 Finish(); 430 Finish();
431 431
432 if (marking_parity_ == EVEN_MARKING_PARITY) { 432 if (marking_parity_ == EVEN_MARKING_PARITY) {
433 marking_parity_ = ODD_MARKING_PARITY; 433 marking_parity_ = ODD_MARKING_PARITY;
434 } else { 434 } else {
435 ASSERT(marking_parity_ == ODD_MARKING_PARITY); 435 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
436 marking_parity_ = EVEN_MARKING_PARITY; 436 marking_parity_ = EVEN_MARKING_PARITY;
437 } 437 }
438 } 438 }
439 439
440 440
441 #ifdef VERIFY_HEAP 441 #ifdef VERIFY_HEAP
442 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { 442 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
443 PageIterator it(space); 443 PageIterator it(space);
444 444
445 while (it.has_next()) { 445 while (it.has_next()) {
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
557 } 557 }
558 558
559 Heap* heap_; 559 Heap* heap_;
560 PagedSpace* space_; 560 PagedSpace* space_;
561 561
562 DISALLOW_COPY_AND_ASSIGN(SweeperTask); 562 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
563 }; 563 };
564 564
565 565
566 void MarkCompactCollector::StartSweeperThreads() { 566 void MarkCompactCollector::StartSweeperThreads() {
567 ASSERT(free_list_old_pointer_space_.get()->IsEmpty()); 567 DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
568 ASSERT(free_list_old_data_space_.get()->IsEmpty()); 568 DCHECK(free_list_old_data_space_.get()->IsEmpty());
569 sweeping_in_progress_ = true; 569 sweeping_in_progress_ = true;
570 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 570 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
571 isolate()->sweeper_threads()[i]->StartSweeping(); 571 isolate()->sweeper_threads()[i]->StartSweeping();
572 } 572 }
573 if (FLAG_job_based_sweeping) { 573 if (FLAG_job_based_sweeping) {
574 V8::GetCurrentPlatform()->CallOnBackgroundThread( 574 V8::GetCurrentPlatform()->CallOnBackgroundThread(
575 new SweeperTask(heap(), heap()->old_data_space()), 575 new SweeperTask(heap(), heap()->old_data_space()),
576 v8::Platform::kShortRunningTask); 576 v8::Platform::kShortRunningTask);
577 V8::GetCurrentPlatform()->CallOnBackgroundThread( 577 V8::GetCurrentPlatform()->CallOnBackgroundThread(
578 new SweeperTask(heap(), heap()->old_pointer_space()), 578 new SweeperTask(heap(), heap()->old_pointer_space()),
579 v8::Platform::kShortRunningTask); 579 v8::Platform::kShortRunningTask);
580 } 580 }
581 } 581 }
582 582
583 583
584 void MarkCompactCollector::EnsureSweepingCompleted() { 584 void MarkCompactCollector::EnsureSweepingCompleted() {
585 ASSERT(sweeping_in_progress_ == true); 585 DCHECK(sweeping_in_progress_ == true);
586 586
587 // If sweeping is not completed, we try to complete it here. If we do not 587 // If sweeping is not completed, we try to complete it here. If we do not
588 // have sweeper threads we have to complete since we do not have a good 588 // have sweeper threads we have to complete since we do not have a good
589 // indicator for a swept space in that case. 589 // indicator for a swept space in that case.
590 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) { 590 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
591 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0); 591 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
592 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0); 592 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
593 } 593 }
594 594
595 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 595 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
653 } 653 }
654 654
655 655
656 bool MarkCompactCollector::AreSweeperThreadsActivated() { 656 bool MarkCompactCollector::AreSweeperThreadsActivated() {
657 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; 657 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
658 } 658 }
659 659
660 660
661 void Marking::TransferMark(Address old_start, Address new_start) { 661 void Marking::TransferMark(Address old_start, Address new_start) {
662 // This is only used when resizing an object. 662 // This is only used when resizing an object.
663 ASSERT(MemoryChunk::FromAddress(old_start) == 663 DCHECK(MemoryChunk::FromAddress(old_start) ==
664 MemoryChunk::FromAddress(new_start)); 664 MemoryChunk::FromAddress(new_start));
665 665
666 if (!heap_->incremental_marking()->IsMarking()) return; 666 if (!heap_->incremental_marking()->IsMarking()) return;
667 667
668 // If the mark doesn't move, we don't check the color of the object. 668 // If the mark doesn't move, we don't check the color of the object.
669 // It doesn't matter whether the object is black, since it hasn't changed 669 // It doesn't matter whether the object is black, since it hasn't changed
670 // size, so the adjustment to the live data count will be zero anyway. 670 // size, so the adjustment to the live data count will be zero anyway.
671 if (old_start == new_start) return; 671 if (old_start == new_start) return;
672 672
673 MarkBit new_mark_bit = MarkBitFrom(new_start); 673 MarkBit new_mark_bit = MarkBitFrom(new_start);
674 MarkBit old_mark_bit = MarkBitFrom(old_start); 674 MarkBit old_mark_bit = MarkBitFrom(old_start);
675 675
676 #ifdef DEBUG 676 #ifdef DEBUG
677 ObjectColor old_color = Color(old_mark_bit); 677 ObjectColor old_color = Color(old_mark_bit);
678 #endif 678 #endif
679 679
680 if (Marking::IsBlack(old_mark_bit)) { 680 if (Marking::IsBlack(old_mark_bit)) {
681 old_mark_bit.Clear(); 681 old_mark_bit.Clear();
682 ASSERT(IsWhite(old_mark_bit)); 682 DCHECK(IsWhite(old_mark_bit));
683 Marking::MarkBlack(new_mark_bit); 683 Marking::MarkBlack(new_mark_bit);
684 return; 684 return;
685 } else if (Marking::IsGrey(old_mark_bit)) { 685 } else if (Marking::IsGrey(old_mark_bit)) {
686 old_mark_bit.Clear(); 686 old_mark_bit.Clear();
687 old_mark_bit.Next().Clear(); 687 old_mark_bit.Next().Clear();
688 ASSERT(IsWhite(old_mark_bit)); 688 DCHECK(IsWhite(old_mark_bit));
689 heap_->incremental_marking()->WhiteToGreyAndPush( 689 heap_->incremental_marking()->WhiteToGreyAndPush(
690 HeapObject::FromAddress(new_start), new_mark_bit); 690 HeapObject::FromAddress(new_start), new_mark_bit);
691 heap_->incremental_marking()->RestartIfNotMarking(); 691 heap_->incremental_marking()->RestartIfNotMarking();
692 } 692 }
693 693
694 #ifdef DEBUG 694 #ifdef DEBUG
695 ObjectColor new_color = Color(new_mark_bit); 695 ObjectColor new_color = Color(new_mark_bit);
696 ASSERT(new_color == old_color); 696 DCHECK(new_color == old_color);
697 #endif 697 #endif
698 } 698 }
699 699
700 700
701 const char* AllocationSpaceName(AllocationSpace space) { 701 const char* AllocationSpaceName(AllocationSpace space) {
702 switch (space) { 702 switch (space) {
703 case NEW_SPACE: return "NEW_SPACE"; 703 case NEW_SPACE: return "NEW_SPACE";
704 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; 704 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
705 case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; 705 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
706 case CODE_SPACE: return "CODE_SPACE"; 706 case CODE_SPACE: return "CODE_SPACE";
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
771 return 1; 771 return 1;
772 } 772 }
773 773
774 if (ratio <= ratio_threshold) return 0; // Not fragmented. 774 if (ratio <= ratio_threshold) return 0; // Not fragmented.
775 775
776 return static_cast<int>(ratio - ratio_threshold); 776 return static_cast<int>(ratio - ratio_threshold);
777 } 777 }
778 778
779 779
780 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 780 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
781 ASSERT(space->identity() == OLD_POINTER_SPACE || 781 DCHECK(space->identity() == OLD_POINTER_SPACE ||
782 space->identity() == OLD_DATA_SPACE || 782 space->identity() == OLD_DATA_SPACE ||
783 space->identity() == CODE_SPACE); 783 space->identity() == CODE_SPACE);
784 784
785 static const int kMaxMaxEvacuationCandidates = 1000; 785 static const int kMaxMaxEvacuationCandidates = 1000;
786 int number_of_pages = space->CountTotalPages(); 786 int number_of_pages = space->CountTotalPages();
787 int max_evacuation_candidates = 787 int max_evacuation_candidates =
788 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); 788 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
789 789
790 if (FLAG_stress_compaction || FLAG_always_compact) { 790 if (FLAG_stress_compaction || FLAG_always_compact) {
791 max_evacuation_candidates = kMaxMaxEvacuationCandidates; 791 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
938 for (int i = 0; i < npages; i++) { 938 for (int i = 0; i < npages; i++) {
939 Page* p = evacuation_candidates_[i]; 939 Page* p = evacuation_candidates_[i];
940 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); 940 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
941 p->ClearEvacuationCandidate(); 941 p->ClearEvacuationCandidate();
942 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 942 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
943 } 943 }
944 compacting_ = false; 944 compacting_ = false;
945 evacuation_candidates_.Rewind(0); 945 evacuation_candidates_.Rewind(0);
946 invalidated_code_.Rewind(0); 946 invalidated_code_.Rewind(0);
947 } 947 }
948 ASSERT_EQ(0, evacuation_candidates_.length()); 948 DCHECK_EQ(0, evacuation_candidates_.length());
949 } 949 }
950 950
951 951
952 void MarkCompactCollector::Prepare() { 952 void MarkCompactCollector::Prepare() {
953 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); 953 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
954 954
955 #ifdef DEBUG 955 #ifdef DEBUG
956 ASSERT(state_ == IDLE); 956 DCHECK(state_ == IDLE);
957 state_ = PREPARE_GC; 957 state_ = PREPARE_GC;
958 #endif 958 #endif
959 959
960 ASSERT(!FLAG_never_compact || !FLAG_always_compact); 960 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
961 961
962 if (sweeping_in_progress()) { 962 if (sweeping_in_progress()) {
963 // Instead of waiting we could also abort the sweeper threads here. 963 // Instead of waiting we could also abort the sweeper threads here.
964 EnsureSweepingCompleted(); 964 EnsureSweepingCompleted();
965 } 965 }
966 966
967 // Clear marking bits if incremental marking is aborted. 967 // Clear marking bits if incremental marking is aborted.
968 if (was_marked_incrementally_ && abort_incremental_marking_) { 968 if (was_marked_incrementally_ && abort_incremental_marking_) {
969 heap()->incremental_marking()->Abort(); 969 heap()->incremental_marking()->Abort();
970 ClearMarkbits(); 970 ClearMarkbits();
(...skipping 17 matching lines...) Expand all
988 #ifdef VERIFY_HEAP 988 #ifdef VERIFY_HEAP
989 if (!was_marked_incrementally_ && FLAG_verify_heap) { 989 if (!was_marked_incrementally_ && FLAG_verify_heap) {
990 VerifyMarkbitsAreClean(); 990 VerifyMarkbitsAreClean();
991 } 991 }
992 #endif 992 #endif
993 } 993 }
994 994
995 995
996 void MarkCompactCollector::Finish() { 996 void MarkCompactCollector::Finish() {
997 #ifdef DEBUG 997 #ifdef DEBUG
998 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 998 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
999 state_ = IDLE; 999 state_ = IDLE;
1000 #endif 1000 #endif
1001 // The stub cache is not traversed during GC; clear the cache to 1001 // The stub cache is not traversed during GC; clear the cache to
1002 // force lazy re-initialization of it. This must be done after the 1002 // force lazy re-initialization of it. This must be done after the
1003 // GC, because it relies on the new address of certain old space 1003 // GC, because it relies on the new address of certain old space
1004 // objects (empty string, illegal builtin). 1004 // objects (empty string, illegal builtin).
1005 isolate()->stub_cache()->Clear(); 1005 isolate()->stub_cache()->Clear();
1006 1006
1007 if (have_code_to_deoptimize_) { 1007 if (have_code_to_deoptimize_) {
1008 // Some code objects were marked for deoptimization during the GC. 1008 // Some code objects were marked for deoptimization during the GC.
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
1139 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); 1139 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1140 if (!Marking::MarkBitFrom(code).Get()) continue; 1140 if (!Marking::MarkBitFrom(code).Get()) continue;
1141 1141
1142 // Move every slot in the entry. 1142 // Move every slot in the entry.
1143 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) { 1143 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1144 int dst_index = new_length++; 1144 int dst_index = new_length++;
1145 Object** slot = code_map->RawFieldOfElementAt(dst_index); 1145 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1146 Object* object = code_map->get(i + j); 1146 Object* object = code_map->get(i + j);
1147 code_map->set(dst_index, object); 1147 code_map->set(dst_index, object);
1148 if (j == SharedFunctionInfo::kOsrAstIdOffset) { 1148 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1149 ASSERT(object->IsSmi()); 1149 DCHECK(object->IsSmi());
1150 } else { 1150 } else {
1151 ASSERT(Marking::IsBlack( 1151 DCHECK(Marking::IsBlack(
1152 Marking::MarkBitFrom(HeapObject::cast(*slot)))); 1152 Marking::MarkBitFrom(HeapObject::cast(*slot))));
1153 isolate_->heap()->mark_compact_collector()-> 1153 isolate_->heap()->mark_compact_collector()->
1154 RecordSlot(slot, slot, *slot); 1154 RecordSlot(slot, slot, *slot);
1155 } 1155 }
1156 } 1156 }
1157 } 1157 }
1158 1158
1159 // Trim the optimized code map if entries have been removed. 1159 // Trim the optimized code map if entries have been removed.
1160 if (new_length < old_length) { 1160 if (new_length < old_length) {
1161 holder->TrimOptimizedCodeMap(old_length - new_length); 1161 holder->TrimOptimizedCodeMap(old_length - new_length);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1195 break; 1195 break;
1196 } 1196 }
1197 1197
1198 candidate = next_candidate; 1198 candidate = next_candidate;
1199 } 1199 }
1200 } 1200 }
1201 } 1201 }
1202 1202
1203 1203
1204 void CodeFlusher::EvictCandidate(JSFunction* function) { 1204 void CodeFlusher::EvictCandidate(JSFunction* function) {
1205 ASSERT(!function->next_function_link()->IsUndefined()); 1205 DCHECK(!function->next_function_link()->IsUndefined());
1206 Object* undefined = isolate_->heap()->undefined_value(); 1206 Object* undefined = isolate_->heap()->undefined_value();
1207 1207
1208 // Make sure previous flushing decisions are revisited. 1208 // Make sure previous flushing decisions are revisited.
1209 isolate_->heap()->incremental_marking()->RecordWrites(function); 1209 isolate_->heap()->incremental_marking()->RecordWrites(function);
1210 isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); 1210 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1211 1211
1212 if (FLAG_trace_code_flushing) { 1212 if (FLAG_trace_code_flushing) {
1213 PrintF("[code-flushing abandons closure: "); 1213 PrintF("[code-flushing abandons closure: ");
1214 function->shared()->ShortPrint(); 1214 function->shared()->ShortPrint();
1215 PrintF("]\n"); 1215 PrintF("]\n");
(...skipping 16 matching lines...) Expand all
1232 break; 1232 break;
1233 } 1233 }
1234 1234
1235 candidate = next_candidate; 1235 candidate = next_candidate;
1236 } 1236 }
1237 } 1237 }
1238 } 1238 }
1239 1239
1240 1240
1241 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { 1241 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1242 ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())-> 1242 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())->
1243 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined()); 1243 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1244 1244
1245 // Make sure previous flushing decisions are revisited. 1245 // Make sure previous flushing decisions are revisited.
1246 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder); 1246 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1247 1247
1248 if (FLAG_trace_code_flushing) { 1248 if (FLAG_trace_code_flushing) {
1249 PrintF("[code-flushing abandons code-map: "); 1249 PrintF("[code-flushing abandons code-map: ");
1250 code_map_holder->ShortPrint(); 1250 code_map_holder->ShortPrint();
1251 PrintF("]\n"); 1251 PrintF("]\n");
1252 } 1252 }
(...skipping 22 matching lines...) Expand all
1275 1275
1276 1276
1277 void CodeFlusher::EvictJSFunctionCandidates() { 1277 void CodeFlusher::EvictJSFunctionCandidates() {
1278 JSFunction* candidate = jsfunction_candidates_head_; 1278 JSFunction* candidate = jsfunction_candidates_head_;
1279 JSFunction* next_candidate; 1279 JSFunction* next_candidate;
1280 while (candidate != NULL) { 1280 while (candidate != NULL) {
1281 next_candidate = GetNextCandidate(candidate); 1281 next_candidate = GetNextCandidate(candidate);
1282 EvictCandidate(candidate); 1282 EvictCandidate(candidate);
1283 candidate = next_candidate; 1283 candidate = next_candidate;
1284 } 1284 }
1285 ASSERT(jsfunction_candidates_head_ == NULL); 1285 DCHECK(jsfunction_candidates_head_ == NULL);
1286 } 1286 }
1287 1287
1288 1288
1289 void CodeFlusher::EvictSharedFunctionInfoCandidates() { 1289 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1290 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; 1290 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1291 SharedFunctionInfo* next_candidate; 1291 SharedFunctionInfo* next_candidate;
1292 while (candidate != NULL) { 1292 while (candidate != NULL) {
1293 next_candidate = GetNextCandidate(candidate); 1293 next_candidate = GetNextCandidate(candidate);
1294 EvictCandidate(candidate); 1294 EvictCandidate(candidate);
1295 candidate = next_candidate; 1295 candidate = next_candidate;
1296 } 1296 }
1297 ASSERT(shared_function_info_candidates_head_ == NULL); 1297 DCHECK(shared_function_info_candidates_head_ == NULL);
1298 } 1298 }
1299 1299
1300 1300
1301 void CodeFlusher::EvictOptimizedCodeMaps() { 1301 void CodeFlusher::EvictOptimizedCodeMaps() {
1302 SharedFunctionInfo* holder = optimized_code_map_holder_head_; 1302 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1303 SharedFunctionInfo* next_holder; 1303 SharedFunctionInfo* next_holder;
1304 while (holder != NULL) { 1304 while (holder != NULL) {
1305 next_holder = GetNextCodeMap(holder); 1305 next_holder = GetNextCodeMap(holder);
1306 EvictOptimizedCodeMap(holder); 1306 EvictOptimizedCodeMap(holder);
1307 holder = next_holder; 1307 holder = next_holder;
1308 } 1308 }
1309 ASSERT(optimized_code_map_holder_head_ == NULL); 1309 DCHECK(optimized_code_map_holder_head_ == NULL);
1310 } 1310 }
1311 1311
1312 1312
1313 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { 1313 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1314 Heap* heap = isolate_->heap(); 1314 Heap* heap = isolate_->heap();
1315 1315
1316 JSFunction** slot = &jsfunction_candidates_head_; 1316 JSFunction** slot = &jsfunction_candidates_head_;
1317 JSFunction* candidate = jsfunction_candidates_head_; 1317 JSFunction* candidate = jsfunction_candidates_head_;
1318 while (candidate != NULL) { 1318 while (candidate != NULL) {
1319 if (heap->InFromSpace(candidate)) { 1319 if (heap->InFromSpace(candidate)) {
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1431 collector->RecordSlot(anchor_slot, p, object); 1431 collector->RecordSlot(anchor_slot, p, object);
1432 MarkBit mark = Marking::MarkBitFrom(object); 1432 MarkBit mark = Marking::MarkBitFrom(object);
1433 collector->MarkObject(object, mark); 1433 collector->MarkObject(object, mark);
1434 } 1434 }
1435 1435
1436 1436
1437 // Visit an unmarked object. 1437 // Visit an unmarked object.
1438 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, 1438 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1439 HeapObject* obj)) { 1439 HeapObject* obj)) {
1440 #ifdef DEBUG 1440 #ifdef DEBUG
1441 ASSERT(collector->heap()->Contains(obj)); 1441 DCHECK(collector->heap()->Contains(obj));
1442 ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj)); 1442 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1443 #endif 1443 #endif
1444 Map* map = obj->map(); 1444 Map* map = obj->map();
1445 Heap* heap = obj->GetHeap(); 1445 Heap* heap = obj->GetHeap();
1446 MarkBit mark = Marking::MarkBitFrom(obj); 1446 MarkBit mark = Marking::MarkBitFrom(obj);
1447 heap->mark_compact_collector()->SetMark(obj, mark); 1447 heap->mark_compact_collector()->SetMark(obj, mark);
1448 // Mark the map pointer and the body. 1448 // Mark the map pointer and the body.
1449 MarkBit map_mark = Marking::MarkBitFrom(map); 1449 MarkBit map_mark = Marking::MarkBitFrom(map);
1450 heap->mark_compact_collector()->MarkObject(map, map_mark); 1450 heap->mark_compact_collector()->MarkObject(map, map_mark);
1451 IterateBody(map, obj); 1451 IterateBody(map, obj);
1452 } 1452 }
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
1598 } 1598 }
1599 1599
1600 1600
1601 template<> 1601 template<>
1602 class MarkCompactMarkingVisitor::ObjectStatsTracker< 1602 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1603 MarkCompactMarkingVisitor::kVisitMap> { 1603 MarkCompactMarkingVisitor::kVisitMap> {
1604 public: 1604 public:
1605 static inline void Visit(Map* map, HeapObject* obj) { 1605 static inline void Visit(Map* map, HeapObject* obj) {
1606 Heap* heap = map->GetHeap(); 1606 Heap* heap = map->GetHeap();
1607 Map* map_obj = Map::cast(obj); 1607 Map* map_obj = Map::cast(obj);
1608 ASSERT(map->instance_type() == MAP_TYPE); 1608 DCHECK(map->instance_type() == MAP_TYPE);
1609 DescriptorArray* array = map_obj->instance_descriptors(); 1609 DescriptorArray* array = map_obj->instance_descriptors();
1610 if (map_obj->owns_descriptors() && 1610 if (map_obj->owns_descriptors() &&
1611 array != heap->empty_descriptor_array()) { 1611 array != heap->empty_descriptor_array()) {
1612 int fixed_array_size = array->Size(); 1612 int fixed_array_size = array->Size();
1613 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE, 1613 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1614 fixed_array_size); 1614 fixed_array_size);
1615 } 1615 }
1616 if (map_obj->HasTransitionArray()) { 1616 if (map_obj->HasTransitionArray()) {
1617 int fixed_array_size = map_obj->transitions()->Size(); 1617 int fixed_array_size = map_obj->transitions()->Size();
1618 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE, 1618 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
(...skipping 14 matching lines...) Expand all
1633 }; 1633 };
1634 1634
1635 1635
1636 template<> 1636 template<>
1637 class MarkCompactMarkingVisitor::ObjectStatsTracker< 1637 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1638 MarkCompactMarkingVisitor::kVisitCode> { 1638 MarkCompactMarkingVisitor::kVisitCode> {
1639 public: 1639 public:
1640 static inline void Visit(Map* map, HeapObject* obj) { 1640 static inline void Visit(Map* map, HeapObject* obj) {
1641 Heap* heap = map->GetHeap(); 1641 Heap* heap = map->GetHeap();
1642 int object_size = obj->Size(); 1642 int object_size = obj->Size();
1643 ASSERT(map->instance_type() == CODE_TYPE); 1643 DCHECK(map->instance_type() == CODE_TYPE);
1644 Code* code_obj = Code::cast(obj); 1644 Code* code_obj = Code::cast(obj);
1645 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(), 1645 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1646 object_size); 1646 object_size);
1647 ObjectStatsVisitBase(kVisitCode, map, obj); 1647 ObjectStatsVisitBase(kVisitCode, map, obj);
1648 } 1648 }
1649 }; 1649 };
1650 1650
1651 1651
1652 template<> 1652 template<>
1653 class MarkCompactMarkingVisitor::ObjectStatsTracker< 1653 class MarkCompactMarkingVisitor::ObjectStatsTracker<
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
1771 // If code flushing is disabled, there is no need to prepare for it. 1771 // If code flushing is disabled, there is no need to prepare for it.
1772 if (!is_code_flushing_enabled()) return; 1772 if (!is_code_flushing_enabled()) return;
1773 1773
1774 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray 1774 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1775 // relies on it being marked before any other descriptor array. 1775 // relies on it being marked before any other descriptor array.
1776 HeapObject* descriptor_array = heap()->empty_descriptor_array(); 1776 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1777 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); 1777 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1778 MarkObject(descriptor_array, descriptor_array_mark); 1778 MarkObject(descriptor_array, descriptor_array_mark);
1779 1779
1780 // Make sure we are not referencing the code from the stack. 1780 // Make sure we are not referencing the code from the stack.
1781 ASSERT(this == heap()->mark_compact_collector()); 1781 DCHECK(this == heap()->mark_compact_collector());
1782 PrepareThreadForCodeFlushing(heap()->isolate(), 1782 PrepareThreadForCodeFlushing(heap()->isolate(),
1783 heap()->isolate()->thread_local_top()); 1783 heap()->isolate()->thread_local_top());
1784 1784
1785 // Iterate the archived stacks in all threads to check if 1785 // Iterate the archived stacks in all threads to check if
1786 // the code is referenced. 1786 // the code is referenced.
1787 CodeMarkingVisitor code_marking_visitor(this); 1787 CodeMarkingVisitor code_marking_visitor(this);
1788 heap()->isolate()->thread_manager()->IterateArchivedThreads( 1788 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1789 &code_marking_visitor); 1789 &code_marking_visitor);
1790 1790
1791 SharedFunctionInfoMarkingVisitor visitor(this); 1791 SharedFunctionInfoMarkingVisitor visitor(this);
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1848 explicit StringTableCleaner(Heap* heap) 1848 explicit StringTableCleaner(Heap* heap)
1849 : heap_(heap), pointers_removed_(0) { } 1849 : heap_(heap), pointers_removed_(0) { }
1850 1850
1851 virtual void VisitPointers(Object** start, Object** end) { 1851 virtual void VisitPointers(Object** start, Object** end) {
1852 // Visit all HeapObject pointers in [start, end). 1852 // Visit all HeapObject pointers in [start, end).
1853 for (Object** p = start; p < end; p++) { 1853 for (Object** p = start; p < end; p++) {
1854 Object* o = *p; 1854 Object* o = *p;
1855 if (o->IsHeapObject() && 1855 if (o->IsHeapObject() &&
1856 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { 1856 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1857 if (finalize_external_strings) { 1857 if (finalize_external_strings) {
1858 ASSERT(o->IsExternalString()); 1858 DCHECK(o->IsExternalString());
1859 heap_->FinalizeExternalString(String::cast(*p)); 1859 heap_->FinalizeExternalString(String::cast(*p));
1860 } else { 1860 } else {
1861 pointers_removed_++; 1861 pointers_removed_++;
1862 } 1862 }
1863 // Set the entry to the_hole_value (as deleted). 1863 // Set the entry to the_hole_value (as deleted).
1864 *p = heap_->the_hole_value(); 1864 *p = heap_->the_hole_value();
1865 } 1865 }
1866 } 1866 }
1867 } 1867 }
1868 1868
1869 int PointersRemoved() { 1869 int PointersRemoved() {
1870 ASSERT(!finalize_external_strings); 1870 DCHECK(!finalize_external_strings);
1871 return pointers_removed_; 1871 return pointers_removed_;
1872 } 1872 }
1873 1873
1874 private: 1874 private:
1875 Heap* heap_; 1875 Heap* heap_;
1876 int pointers_removed_; 1876 int pointers_removed_;
1877 }; 1877 };
1878 1878
1879 1879
1880 typedef StringTableCleaner<false> InternalizedStringTableCleaner; 1880 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
(...skipping 24 matching lines...) Expand all
1905 1905
1906 // Fill the marking stack with overflowed objects returned by the given 1906 // Fill the marking stack with overflowed objects returned by the given
1907 // iterator. Stop when the marking stack is filled or the end of the space 1907 // iterator. Stop when the marking stack is filled or the end of the space
1908 // is reached, whichever comes first. 1908 // is reached, whichever comes first.
1909 template<class T> 1909 template<class T>
1910 static void DiscoverGreyObjectsWithIterator(Heap* heap, 1910 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1911 MarkingDeque* marking_deque, 1911 MarkingDeque* marking_deque,
1912 T* it) { 1912 T* it) {
1913 // The caller should ensure that the marking stack is initially not full, 1913 // The caller should ensure that the marking stack is initially not full,
1914 // so that we don't waste effort pointlessly scanning for objects. 1914 // so that we don't waste effort pointlessly scanning for objects.
1915 ASSERT(!marking_deque->IsFull()); 1915 DCHECK(!marking_deque->IsFull());
1916 1916
1917 Map* filler_map = heap->one_pointer_filler_map(); 1917 Map* filler_map = heap->one_pointer_filler_map();
1918 for (HeapObject* object = it->Next(); 1918 for (HeapObject* object = it->Next();
1919 object != NULL; 1919 object != NULL;
1920 object = it->Next()) { 1920 object = it->Next()) {
1921 MarkBit markbit = Marking::MarkBitFrom(object); 1921 MarkBit markbit = Marking::MarkBitFrom(object);
1922 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { 1922 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1923 Marking::GreyToBlack(markbit); 1923 Marking::GreyToBlack(markbit);
1924 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); 1924 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1925 marking_deque->PushBlack(object); 1925 marking_deque->PushBlack(object);
1926 if (marking_deque->IsFull()) return; 1926 if (marking_deque->IsFull()) return;
1927 } 1927 }
1928 } 1928 }
1929 } 1929 }
1930 1930
1931 1931
1932 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); 1932 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1933 1933
1934 1934
1935 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, 1935 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1936 MemoryChunk* p) { 1936 MemoryChunk* p) {
1937 ASSERT(!marking_deque->IsFull()); 1937 DCHECK(!marking_deque->IsFull());
1938 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 1938 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1939 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 1939 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1940 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 1940 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1941 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 1941 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1942 1942
1943 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 1943 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1944 Address cell_base = it.CurrentCellBase(); 1944 Address cell_base = it.CurrentCellBase();
1945 MarkBit::CellType* cell = it.CurrentCell(); 1945 MarkBit::CellType* cell = it.CurrentCell();
1946 1946
1947 const MarkBit::CellType current_cell = *cell; 1947 const MarkBit::CellType current_cell = *cell;
1948 if (current_cell == 0) continue; 1948 if (current_cell == 0) continue;
1949 1949
1950 MarkBit::CellType grey_objects; 1950 MarkBit::CellType grey_objects;
1951 if (it.HasNext()) { 1951 if (it.HasNext()) {
1952 const MarkBit::CellType next_cell = *(cell+1); 1952 const MarkBit::CellType next_cell = *(cell+1);
1953 grey_objects = current_cell & 1953 grey_objects = current_cell &
1954 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); 1954 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1955 } else { 1955 } else {
1956 grey_objects = current_cell & (current_cell >> 1); 1956 grey_objects = current_cell & (current_cell >> 1);
1957 } 1957 }
1958 1958
1959 int offset = 0; 1959 int offset = 0;
1960 while (grey_objects != 0) { 1960 while (grey_objects != 0) {
1961 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); 1961 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1962 grey_objects >>= trailing_zeros; 1962 grey_objects >>= trailing_zeros;
1963 offset += trailing_zeros; 1963 offset += trailing_zeros;
1964 MarkBit markbit(cell, 1 << offset, false); 1964 MarkBit markbit(cell, 1 << offset, false);
1965 ASSERT(Marking::IsGrey(markbit)); 1965 DCHECK(Marking::IsGrey(markbit));
1966 Marking::GreyToBlack(markbit); 1966 Marking::GreyToBlack(markbit);
1967 Address addr = cell_base + offset * kPointerSize; 1967 Address addr = cell_base + offset * kPointerSize;
1968 HeapObject* object = HeapObject::FromAddress(addr); 1968 HeapObject* object = HeapObject::FromAddress(addr);
1969 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); 1969 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1970 marking_deque->PushBlack(object); 1970 marking_deque->PushBlack(object);
1971 if (marking_deque->IsFull()) return; 1971 if (marking_deque->IsFull()) return;
1972 offset += 2; 1972 offset += 2;
1973 grey_objects >>= 2; 1973 grey_objects >>= 2;
1974 } 1974 }
1975 1975
1976 grey_objects >>= (Bitmap::kBitsPerCell - 1); 1976 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1977 } 1977 }
1978 } 1978 }
1979 1979
1980 1980
1981 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( 1981 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1982 NewSpace* new_space, 1982 NewSpace* new_space,
1983 NewSpacePage* p) { 1983 NewSpacePage* p) {
1984 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 1984 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1985 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 1985 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1986 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 1986 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1987 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 1987 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1988 1988
1989 MarkBit::CellType* cells = p->markbits()->cells(); 1989 MarkBit::CellType* cells = p->markbits()->cells();
1990 int survivors_size = 0; 1990 int survivors_size = 0;
1991 1991
1992 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 1992 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1993 Address cell_base = it.CurrentCellBase(); 1993 Address cell_base = it.CurrentCellBase();
1994 MarkBit::CellType* cell = it.CurrentCell(); 1994 MarkBit::CellType* cell = it.CurrentCell();
1995 1995
1996 MarkBit::CellType current_cell = *cell; 1996 MarkBit::CellType current_cell = *cell;
1997 if (current_cell == 0) continue; 1997 if (current_cell == 0) continue;
(...skipping 22 matching lines...) Expand all
2020 2020
2021 AllocationResult allocation = new_space->AllocateRaw(size); 2021 AllocationResult allocation = new_space->AllocateRaw(size);
2022 if (allocation.IsRetry()) { 2022 if (allocation.IsRetry()) {
2023 if (!new_space->AddFreshPage()) { 2023 if (!new_space->AddFreshPage()) {
2024 // Shouldn't happen. We are sweeping linearly, and to-space 2024 // Shouldn't happen. We are sweeping linearly, and to-space
2025 // has the same number of pages as from-space, so there is 2025 // has the same number of pages as from-space, so there is
2026 // always room. 2026 // always room.
2027 UNREACHABLE(); 2027 UNREACHABLE();
2028 } 2028 }
2029 allocation = new_space->AllocateRaw(size); 2029 allocation = new_space->AllocateRaw(size);
2030 ASSERT(!allocation.IsRetry()); 2030 DCHECK(!allocation.IsRetry());
2031 } 2031 }
2032 Object* target = allocation.ToObjectChecked(); 2032 Object* target = allocation.ToObjectChecked();
2033 2033
2034 MigrateObject(HeapObject::cast(target), 2034 MigrateObject(HeapObject::cast(target),
2035 object, 2035 object,
2036 size, 2036 size,
2037 NEW_SPACE); 2037 NEW_SPACE);
2038 heap()->IncrementSemiSpaceCopiedObjectSize(size); 2038 heap()->IncrementSemiSpaceCopiedObjectSize(size);
2039 } 2039 }
2040 *cells = 0; 2040 *cells = 0;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2077 if (!o->IsHeapObject()) return false; 2077 if (!o->IsHeapObject()) return false;
2078 HeapObject* heap_object = HeapObject::cast(o); 2078 HeapObject* heap_object = HeapObject::cast(o);
2079 MarkBit mark = Marking::MarkBitFrom(heap_object); 2079 MarkBit mark = Marking::MarkBitFrom(heap_object);
2080 return !mark.Get(); 2080 return !mark.Get();
2081 } 2081 }
2082 2082
2083 2083
2084 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, 2084 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2085 Object** p) { 2085 Object** p) {
2086 Object* o = *p; 2086 Object* o = *p;
2087 ASSERT(o->IsHeapObject()); 2087 DCHECK(o->IsHeapObject());
2088 HeapObject* heap_object = HeapObject::cast(o); 2088 HeapObject* heap_object = HeapObject::cast(o);
2089 MarkBit mark = Marking::MarkBitFrom(heap_object); 2089 MarkBit mark = Marking::MarkBitFrom(heap_object);
2090 return !mark.Get(); 2090 return !mark.Get();
2091 } 2091 }
2092 2092
2093 2093
2094 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { 2094 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2095 StringTable* string_table = heap()->string_table(); 2095 StringTable* string_table = heap()->string_table();
2096 // Mark the string table itself. 2096 // Mark the string table itself.
2097 MarkBit string_table_mark = Marking::MarkBitFrom(string_table); 2097 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2129 } 2129 }
2130 2130
2131 2131
2132 void MarkCompactCollector::MarkImplicitRefGroups() { 2132 void MarkCompactCollector::MarkImplicitRefGroups() {
2133 List<ImplicitRefGroup*>* ref_groups = 2133 List<ImplicitRefGroup*>* ref_groups =
2134 isolate()->global_handles()->implicit_ref_groups(); 2134 isolate()->global_handles()->implicit_ref_groups();
2135 2135
2136 int last = 0; 2136 int last = 0;
2137 for (int i = 0; i < ref_groups->length(); i++) { 2137 for (int i = 0; i < ref_groups->length(); i++) {
2138 ImplicitRefGroup* entry = ref_groups->at(i); 2138 ImplicitRefGroup* entry = ref_groups->at(i);
2139 ASSERT(entry != NULL); 2139 DCHECK(entry != NULL);
2140 2140
2141 if (!IsMarked(*entry->parent)) { 2141 if (!IsMarked(*entry->parent)) {
2142 (*ref_groups)[last++] = entry; 2142 (*ref_groups)[last++] = entry;
2143 continue; 2143 continue;
2144 } 2144 }
2145 2145
2146 Object*** children = entry->children; 2146 Object*** children = entry->children;
2147 // A parent object is marked, so mark all child heap objects. 2147 // A parent object is marked, so mark all child heap objects.
2148 for (size_t j = 0; j < entry->length; ++j) { 2148 for (size_t j = 0; j < entry->length; ++j) {
2149 if ((*children[j])->IsHeapObject()) { 2149 if ((*children[j])->IsHeapObject()) {
(...skipping 21 matching lines...) Expand all
2171 } 2171 }
2172 2172
2173 2173
2174 // Mark all objects reachable from the objects on the marking stack. 2174 // Mark all objects reachable from the objects on the marking stack.
2175 // Before: the marking stack contains zero or more heap object pointers. 2175 // Before: the marking stack contains zero or more heap object pointers.
2176 // After: the marking stack is empty, and all objects reachable from the 2176 // After: the marking stack is empty, and all objects reachable from the
2177 // marking stack have been marked, or are overflowed in the heap. 2177 // marking stack have been marked, or are overflowed in the heap.
2178 void MarkCompactCollector::EmptyMarkingDeque() { 2178 void MarkCompactCollector::EmptyMarkingDeque() {
2179 while (!marking_deque_.IsEmpty()) { 2179 while (!marking_deque_.IsEmpty()) {
2180 HeapObject* object = marking_deque_.Pop(); 2180 HeapObject* object = marking_deque_.Pop();
2181 ASSERT(object->IsHeapObject()); 2181 DCHECK(object->IsHeapObject());
2182 ASSERT(heap()->Contains(object)); 2182 DCHECK(heap()->Contains(object));
2183 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); 2183 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2184 2184
2185 Map* map = object->map(); 2185 Map* map = object->map();
2186 MarkBit map_mark = Marking::MarkBitFrom(map); 2186 MarkBit map_mark = Marking::MarkBitFrom(map);
2187 MarkObject(map, map_mark); 2187 MarkObject(map, map_mark);
2188 2188
2189 MarkCompactMarkingVisitor::IterateBody(map, object); 2189 MarkCompactMarkingVisitor::IterateBody(map, object);
2190 } 2190 }
2191 } 2191 }
2192 2192
2193 2193
2194 // Sweep the heap for overflowed objects, clear their overflow bits, and 2194 // Sweep the heap for overflowed objects, clear their overflow bits, and
2195 // push them on the marking stack. Stop early if the marking stack fills 2195 // push them on the marking stack. Stop early if the marking stack fills
2196 // before sweeping completes. If sweeping completes, there are no remaining 2196 // before sweeping completes. If sweeping completes, there are no remaining
2197 // overflowed objects in the heap so the overflow flag on the markings stack 2197 // overflowed objects in the heap so the overflow flag on the markings stack
2198 // is cleared. 2198 // is cleared.
2199 void MarkCompactCollector::RefillMarkingDeque() { 2199 void MarkCompactCollector::RefillMarkingDeque() {
2200 ASSERT(marking_deque_.overflowed()); 2200 DCHECK(marking_deque_.overflowed());
2201 2201
2202 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_); 2202 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2203 if (marking_deque_.IsFull()) return; 2203 if (marking_deque_.IsFull()) return;
2204 2204
2205 DiscoverGreyObjectsInSpace(heap(), 2205 DiscoverGreyObjectsInSpace(heap(),
2206 &marking_deque_, 2206 &marking_deque_,
2207 heap()->old_pointer_space()); 2207 heap()->old_pointer_space());
2208 if (marking_deque_.IsFull()) return; 2208 if (marking_deque_.IsFull()) return;
2209 2209
2210 DiscoverGreyObjectsInSpace(heap(), 2210 DiscoverGreyObjectsInSpace(heap(),
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
2252 RefillMarkingDeque(); 2252 RefillMarkingDeque();
2253 EmptyMarkingDeque(); 2253 EmptyMarkingDeque();
2254 } 2254 }
2255 } 2255 }
2256 2256
2257 2257
2258 // Mark all objects reachable (transitively) from objects on the marking 2258 // Mark all objects reachable (transitively) from objects on the marking
2259 // stack including references only considered in the atomic marking pause. 2259 // stack including references only considered in the atomic marking pause.
2260 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { 2260 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2261 bool work_to_do = true; 2261 bool work_to_do = true;
2262 ASSERT(marking_deque_.IsEmpty()); 2262 DCHECK(marking_deque_.IsEmpty());
2263 while (work_to_do) { 2263 while (work_to_do) {
2264 isolate()->global_handles()->IterateObjectGroups( 2264 isolate()->global_handles()->IterateObjectGroups(
2265 visitor, &IsUnmarkedHeapObjectWithHeap); 2265 visitor, &IsUnmarkedHeapObjectWithHeap);
2266 MarkImplicitRefGroups(); 2266 MarkImplicitRefGroups();
2267 ProcessWeakCollections(); 2267 ProcessWeakCollections();
2268 work_to_do = !marking_deque_.IsEmpty(); 2268 work_to_do = !marking_deque_.IsEmpty();
2269 ProcessMarkingDeque(); 2269 ProcessMarkingDeque();
2270 } 2270 }
2271 } 2271 }
2272 2272
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2312 incremental_marking->Finalize(); 2312 incremental_marking->Finalize();
2313 incremental_marking_overflowed = 2313 incremental_marking_overflowed =
2314 incremental_marking->marking_deque()->overflowed(); 2314 incremental_marking->marking_deque()->overflowed();
2315 incremental_marking->marking_deque()->ClearOverflowed(); 2315 incremental_marking->marking_deque()->ClearOverflowed();
2316 } else { 2316 } else {
2317 // Abort any pending incremental activities e.g. incremental sweeping. 2317 // Abort any pending incremental activities e.g. incremental sweeping.
2318 incremental_marking->Abort(); 2318 incremental_marking->Abort();
2319 } 2319 }
2320 2320
2321 #ifdef DEBUG 2321 #ifdef DEBUG
2322 ASSERT(state_ == PREPARE_GC); 2322 DCHECK(state_ == PREPARE_GC);
2323 state_ = MARK_LIVE_OBJECTS; 2323 state_ = MARK_LIVE_OBJECTS;
2324 #endif 2324 #endif
2325 // The to space contains live objects, a page in from space is used as a 2325 // The to space contains live objects, a page in from space is used as a
2326 // marking stack. 2326 // marking stack.
2327 Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); 2327 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2328 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); 2328 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2329 if (FLAG_force_marking_deque_overflows) { 2329 if (FLAG_force_marking_deque_overflows) {
2330 marking_deque_end = marking_deque_start + 64 * kPointerSize; 2330 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2331 } 2331 }
2332 marking_deque_.Initialize(marking_deque_start, 2332 marking_deque_.Initialize(marking_deque_start,
2333 marking_deque_end); 2333 marking_deque_end);
2334 ASSERT(!marking_deque_.overflowed()); 2334 DCHECK(!marking_deque_.overflowed());
2335 2335
2336 if (incremental_marking_overflowed) { 2336 if (incremental_marking_overflowed) {
2337 // There are overflowed objects left in the heap after incremental marking. 2337 // There are overflowed objects left in the heap after incremental marking.
2338 marking_deque_.SetOverflowed(); 2338 marking_deque_.SetOverflowed();
2339 } 2339 }
2340 2340
2341 PrepareForCodeFlushing(); 2341 PrepareForCodeFlushing();
2342 2342
2343 if (was_marked_incrementally_) { 2343 if (was_marked_incrementally_) {
2344 // There is no write barrier on cells so we have to scan them now at the end 2344 // There is no write barrier on cells so we have to scan them now at the end
2345 // of the incremental marking. 2345 // of the incremental marking.
2346 { 2346 {
2347 HeapObjectIterator cell_iterator(heap()->cell_space()); 2347 HeapObjectIterator cell_iterator(heap()->cell_space());
2348 HeapObject* cell; 2348 HeapObject* cell;
2349 while ((cell = cell_iterator.Next()) != NULL) { 2349 while ((cell = cell_iterator.Next()) != NULL) {
2350 ASSERT(cell->IsCell()); 2350 DCHECK(cell->IsCell());
2351 if (IsMarked(cell)) { 2351 if (IsMarked(cell)) {
2352 int offset = Cell::kValueOffset; 2352 int offset = Cell::kValueOffset;
2353 MarkCompactMarkingVisitor::VisitPointer( 2353 MarkCompactMarkingVisitor::VisitPointer(
2354 heap(), 2354 heap(),
2355 reinterpret_cast<Object**>(cell->address() + offset)); 2355 reinterpret_cast<Object**>(cell->address() + offset));
2356 } 2356 }
2357 } 2357 }
2358 } 2358 }
2359 { 2359 {
2360 HeapObjectIterator js_global_property_cell_iterator( 2360 HeapObjectIterator js_global_property_cell_iterator(
2361 heap()->property_cell_space()); 2361 heap()->property_cell_space());
2362 HeapObject* cell; 2362 HeapObject* cell;
2363 while ((cell = js_global_property_cell_iterator.Next()) != NULL) { 2363 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2364 ASSERT(cell->IsPropertyCell()); 2364 DCHECK(cell->IsPropertyCell());
2365 if (IsMarked(cell)) { 2365 if (IsMarked(cell)) {
2366 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell); 2366 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2367 } 2367 }
2368 } 2368 }
2369 } 2369 }
2370 } 2370 }
2371 2371
2372 RootMarkingVisitor root_visitor(heap()); 2372 RootMarkingVisitor root_visitor(heap());
2373 MarkRoots(&root_visitor); 2373 MarkRoots(&root_visitor);
2374 2374
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
2469 i += MapCache::kEntrySize) { 2469 i += MapCache::kEntrySize) {
2470 Object* raw_key = map_cache->get(i); 2470 Object* raw_key = map_cache->get(i);
2471 if (raw_key == heap()->undefined_value() || 2471 if (raw_key == heap()->undefined_value() ||
2472 raw_key == heap()->the_hole_value()) continue; 2472 raw_key == heap()->the_hole_value()) continue;
2473 STATIC_ASSERT(MapCache::kEntrySize == 2); 2473 STATIC_ASSERT(MapCache::kEntrySize == 2);
2474 Object* raw_map = map_cache->get(i + 1); 2474 Object* raw_map = map_cache->get(i + 1);
2475 if (raw_map->IsHeapObject() && IsMarked(raw_map)) { 2475 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2476 ++used_elements; 2476 ++used_elements;
2477 } else { 2477 } else {
2478 // Delete useless entries with unmarked maps. 2478 // Delete useless entries with unmarked maps.
2479 ASSERT(raw_map->IsMap()); 2479 DCHECK(raw_map->IsMap());
2480 map_cache->set_the_hole(i); 2480 map_cache->set_the_hole(i);
2481 map_cache->set_the_hole(i + 1); 2481 map_cache->set_the_hole(i + 1);
2482 } 2482 }
2483 } 2483 }
2484 if (used_elements == 0) { 2484 if (used_elements == 0) {
2485 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); 2485 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2486 } else { 2486 } else {
2487 // Note: we don't actually shrink the cache here to avoid 2487 // Note: we don't actually shrink the cache here to avoid
2488 // extra complexity during GC. We rely on subsequent cache 2488 // extra complexity during GC. We rely on subsequent cache
2489 // usages (EnsureCapacity) to do this. 2489 // usages (EnsureCapacity) to do this.
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
2590 2590
2591 int new_number_of_transitions = 0; 2591 int new_number_of_transitions = 0;
2592 const int header = Map::kProtoTransitionHeaderSize; 2592 const int header = Map::kProtoTransitionHeaderSize;
2593 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; 2593 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2594 const int map_offset = header + Map::kProtoTransitionMapOffset; 2594 const int map_offset = header + Map::kProtoTransitionMapOffset;
2595 const int step = Map::kProtoTransitionElementsPerEntry; 2595 const int step = Map::kProtoTransitionElementsPerEntry;
2596 for (int i = 0; i < number_of_transitions; i++) { 2596 for (int i = 0; i < number_of_transitions; i++) {
2597 Object* prototype = prototype_transitions->get(proto_offset + i * step); 2597 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2598 Object* cached_map = prototype_transitions->get(map_offset + i * step); 2598 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2599 if (IsMarked(prototype) && IsMarked(cached_map)) { 2599 if (IsMarked(prototype) && IsMarked(cached_map)) {
2600 ASSERT(!prototype->IsUndefined()); 2600 DCHECK(!prototype->IsUndefined());
2601 int proto_index = proto_offset + new_number_of_transitions * step; 2601 int proto_index = proto_offset + new_number_of_transitions * step;
2602 int map_index = map_offset + new_number_of_transitions * step; 2602 int map_index = map_offset + new_number_of_transitions * step;
2603 if (new_number_of_transitions != i) { 2603 if (new_number_of_transitions != i) {
2604 prototype_transitions->set( 2604 prototype_transitions->set(
2605 proto_index, 2605 proto_index,
2606 prototype, 2606 prototype,
2607 UPDATE_WRITE_BARRIER); 2607 UPDATE_WRITE_BARRIER);
2608 prototype_transitions->set( 2608 prototype_transitions->set(
2609 map_index, 2609 map_index,
2610 cached_map, 2610 cached_map,
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2644 } 2644 }
2645 } 2645 }
2646 2646
2647 2647
2648 void MarkCompactCollector::ClearDependentICList(Object* head) { 2648 void MarkCompactCollector::ClearDependentICList(Object* head) {
2649 Object* current = head; 2649 Object* current = head;
2650 Object* undefined = heap()->undefined_value(); 2650 Object* undefined = heap()->undefined_value();
2651 while (current != undefined) { 2651 while (current != undefined) {
2652 Code* code = Code::cast(current); 2652 Code* code = Code::cast(current);
2653 if (IsMarked(code)) { 2653 if (IsMarked(code)) {
2654 ASSERT(code->is_weak_stub()); 2654 DCHECK(code->is_weak_stub());
2655 IC::InvalidateMaps(code); 2655 IC::InvalidateMaps(code);
2656 } 2656 }
2657 current = code->next_code_link(); 2657 current = code->next_code_link();
2658 code->set_next_code_link(undefined); 2658 code->set_next_code_link(undefined);
2659 } 2659 }
2660 } 2660 }
2661 2661
2662 2662
2663 void MarkCompactCollector::ClearDependentCode( 2663 void MarkCompactCollector::ClearDependentCode(
2664 DependentCode* entries) { 2664 DependentCode* entries) {
2665 DisallowHeapAllocation no_allocation; 2665 DisallowHeapAllocation no_allocation;
2666 DependentCode::GroupStartIndexes starts(entries); 2666 DependentCode::GroupStartIndexes starts(entries);
2667 int number_of_entries = starts.number_of_entries(); 2667 int number_of_entries = starts.number_of_entries();
2668 if (number_of_entries == 0) return; 2668 if (number_of_entries == 0) return;
2669 int g = DependentCode::kWeakICGroup; 2669 int g = DependentCode::kWeakICGroup;
2670 if (starts.at(g) != starts.at(g + 1)) { 2670 if (starts.at(g) != starts.at(g + 1)) {
2671 int i = starts.at(g); 2671 int i = starts.at(g);
2672 ASSERT(i + 1 == starts.at(g + 1)); 2672 DCHECK(i + 1 == starts.at(g + 1));
2673 Object* head = entries->object_at(i); 2673 Object* head = entries->object_at(i);
2674 ClearDependentICList(head); 2674 ClearDependentICList(head);
2675 } 2675 }
2676 g = DependentCode::kWeakCodeGroup; 2676 g = DependentCode::kWeakCodeGroup;
2677 for (int i = starts.at(g); i < starts.at(g + 1); i++) { 2677 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2678 // If the entry is compilation info then the map must be alive, 2678 // If the entry is compilation info then the map must be alive,
2679 // and ClearDependentCode shouldn't be called. 2679 // and ClearDependentCode shouldn't be called.
2680 ASSERT(entries->is_code_at(i)); 2680 DCHECK(entries->is_code_at(i));
2681 Code* code = entries->code_at(i); 2681 Code* code = entries->code_at(i);
2682 if (IsMarked(code) && !code->marked_for_deoptimization()) { 2682 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2683 code->set_marked_for_deoptimization(true); 2683 code->set_marked_for_deoptimization(true);
2684 code->InvalidateEmbeddedObjects(); 2684 code->InvalidateEmbeddedObjects();
2685 have_code_to_deoptimize_ = true; 2685 have_code_to_deoptimize_ = true;
2686 } 2686 }
2687 } 2687 }
2688 for (int i = 0; i < number_of_entries; i++) { 2688 for (int i = 0; i < number_of_entries; i++) {
2689 entries->clear_at(i); 2689 entries->clear_at(i);
2690 } 2690 }
2691 } 2691 }
2692 2692
2693 2693
2694 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup( 2694 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2695 DependentCode* entries, int group, int start, int end, int new_start) { 2695 DependentCode* entries, int group, int start, int end, int new_start) {
2696 int survived = 0; 2696 int survived = 0;
2697 if (group == DependentCode::kWeakICGroup) { 2697 if (group == DependentCode::kWeakICGroup) {
2698 // Dependent weak IC stubs form a linked list and only the head is stored 2698 // Dependent weak IC stubs form a linked list and only the head is stored
2699 // in the dependent code array. 2699 // in the dependent code array.
2700 if (start != end) { 2700 if (start != end) {
2701 ASSERT(start + 1 == end); 2701 DCHECK(start + 1 == end);
2702 Object* old_head = entries->object_at(start); 2702 Object* old_head = entries->object_at(start);
2703 MarkCompactWeakObjectRetainer retainer; 2703 MarkCompactWeakObjectRetainer retainer;
2704 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer); 2704 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2705 entries->set_object_at(new_start, head); 2705 entries->set_object_at(new_start, head);
2706 Object** slot = entries->slot_at(new_start); 2706 Object** slot = entries->slot_at(new_start);
2707 RecordSlot(slot, slot, head); 2707 RecordSlot(slot, slot, head);
2708 // We do not compact this group even if the head is undefined, 2708 // We do not compact this group even if the head is undefined,
2709 // more dependent ICs are likely to be added later. 2709 // more dependent ICs are likely to be added later.
2710 survived = 1; 2710 survived = 1;
2711 } 2711 }
2712 } else { 2712 } else {
2713 for (int i = start; i < end; i++) { 2713 for (int i = start; i < end; i++) {
2714 Object* obj = entries->object_at(i); 2714 Object* obj = entries->object_at(i);
2715 ASSERT(obj->IsCode() || IsMarked(obj)); 2715 DCHECK(obj->IsCode() || IsMarked(obj));
2716 if (IsMarked(obj) && 2716 if (IsMarked(obj) &&
2717 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { 2717 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2718 if (new_start + survived != i) { 2718 if (new_start + survived != i) {
2719 entries->set_object_at(new_start + survived, obj); 2719 entries->set_object_at(new_start + survived, obj);
2720 } 2720 }
2721 Object** slot = entries->slot_at(new_start + survived); 2721 Object** slot = entries->slot_at(new_start + survived);
2722 RecordSlot(slot, slot, obj); 2722 RecordSlot(slot, slot, obj);
2723 survived++; 2723 survived++;
2724 } 2724 }
2725 } 2725 }
(...skipping 22 matching lines...) Expand all
2748 } 2748 }
2749 2749
2750 2750
2751 void MarkCompactCollector::ProcessWeakCollections() { 2751 void MarkCompactCollector::ProcessWeakCollections() {
2752 GCTracer::Scope gc_scope(heap()->tracer(), 2752 GCTracer::Scope gc_scope(heap()->tracer(),
2753 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS); 2753 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2754 Object* weak_collection_obj = heap()->encountered_weak_collections(); 2754 Object* weak_collection_obj = heap()->encountered_weak_collections();
2755 while (weak_collection_obj != Smi::FromInt(0)) { 2755 while (weak_collection_obj != Smi::FromInt(0)) {
2756 JSWeakCollection* weak_collection = 2756 JSWeakCollection* weak_collection =
2757 reinterpret_cast<JSWeakCollection*>(weak_collection_obj); 2757 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2758 ASSERT(MarkCompactCollector::IsMarked(weak_collection)); 2758 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2759 if (weak_collection->table()->IsHashTable()) { 2759 if (weak_collection->table()->IsHashTable()) {
2760 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); 2760 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2761 Object** anchor = reinterpret_cast<Object**>(table->address()); 2761 Object** anchor = reinterpret_cast<Object**>(table->address());
2762 for (int i = 0; i < table->Capacity(); i++) { 2762 for (int i = 0; i < table->Capacity(); i++) {
2763 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { 2763 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2764 Object** key_slot = 2764 Object** key_slot =
2765 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i)); 2765 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2766 RecordSlot(anchor, key_slot, *key_slot); 2766 RecordSlot(anchor, key_slot, *key_slot);
2767 Object** value_slot = 2767 Object** value_slot =
2768 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i)); 2768 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2769 MarkCompactMarkingVisitor::MarkObjectByPointer( 2769 MarkCompactMarkingVisitor::MarkObjectByPointer(
2770 this, anchor, value_slot); 2770 this, anchor, value_slot);
2771 } 2771 }
2772 } 2772 }
2773 } 2773 }
2774 weak_collection_obj = weak_collection->next(); 2774 weak_collection_obj = weak_collection->next();
2775 } 2775 }
2776 } 2776 }
2777 2777
2778 2778
2779 void MarkCompactCollector::ClearWeakCollections() { 2779 void MarkCompactCollector::ClearWeakCollections() {
2780 GCTracer::Scope gc_scope(heap()->tracer(), 2780 GCTracer::Scope gc_scope(heap()->tracer(),
2781 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR); 2781 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2782 Object* weak_collection_obj = heap()->encountered_weak_collections(); 2782 Object* weak_collection_obj = heap()->encountered_weak_collections();
2783 while (weak_collection_obj != Smi::FromInt(0)) { 2783 while (weak_collection_obj != Smi::FromInt(0)) {
2784 JSWeakCollection* weak_collection = 2784 JSWeakCollection* weak_collection =
2785 reinterpret_cast<JSWeakCollection*>(weak_collection_obj); 2785 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2786 ASSERT(MarkCompactCollector::IsMarked(weak_collection)); 2786 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2787 if (weak_collection->table()->IsHashTable()) { 2787 if (weak_collection->table()->IsHashTable()) {
2788 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); 2788 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2789 for (int i = 0; i < table->Capacity(); i++) { 2789 for (int i = 0; i < table->Capacity(); i++) {
2790 HeapObject* key = HeapObject::cast(table->KeyAt(i)); 2790 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2791 if (!MarkCompactCollector::IsMarked(key)) { 2791 if (!MarkCompactCollector::IsMarked(key)) {
2792 table->RemoveEntry(i); 2792 table->RemoveEntry(i);
2793 } 2793 }
2794 } 2794 }
2795 } 2795 }
2796 weak_collection_obj = weak_collection->next(); 2796 weak_collection_obj = weak_collection->next();
(...skipping 29 matching lines...) Expand all
2826 // to new space. We should clear them to avoid encountering them during next 2826 // to new space. We should clear them to avoid encountering them during next
2827 // pointer iteration. This is an issue if the store buffer overflows and we 2827 // pointer iteration. This is an issue if the store buffer overflows and we
2828 // have to scan the entire old space, including dead objects, looking for 2828 // have to scan the entire old space, including dead objects, looking for
2829 // pointers to new space. 2829 // pointers to new space.
2830 void MarkCompactCollector::MigrateObject(HeapObject* dst, 2830 void MarkCompactCollector::MigrateObject(HeapObject* dst,
2831 HeapObject* src, 2831 HeapObject* src,
2832 int size, 2832 int size,
2833 AllocationSpace dest) { 2833 AllocationSpace dest) {
2834 Address dst_addr = dst->address(); 2834 Address dst_addr = dst->address();
2835 Address src_addr = src->address(); 2835 Address src_addr = src->address();
2836 ASSERT(heap()->AllowedToBeMigrated(src, dest)); 2836 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2837 ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize); 2837 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2838 if (dest == OLD_POINTER_SPACE) { 2838 if (dest == OLD_POINTER_SPACE) {
2839 Address src_slot = src_addr; 2839 Address src_slot = src_addr;
2840 Address dst_slot = dst_addr; 2840 Address dst_slot = dst_addr;
2841 ASSERT(IsAligned(size, kPointerSize)); 2841 DCHECK(IsAligned(size, kPointerSize));
2842 2842
2843 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2843 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2844 Object* value = Memory::Object_at(src_slot); 2844 Object* value = Memory::Object_at(src_slot);
2845 2845
2846 Memory::Object_at(dst_slot) = value; 2846 Memory::Object_at(dst_slot) = value;
2847 2847
2848 // We special case ConstantPoolArrays below since they could contain 2848 // We special case ConstantPoolArrays below since they could contain
2849 // integers value entries which look like tagged pointers. 2849 // integers value entries which look like tagged pointers.
2850 // TODO(mstarzinger): restructure this code to avoid this special-casing. 2850 // TODO(mstarzinger): restructure this code to avoid this special-casing.
2851 if (!src->IsConstantPoolArray()) { 2851 if (!src->IsConstantPoolArray()) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2894 } else if (dest == CODE_SPACE) { 2894 } else if (dest == CODE_SPACE) {
2895 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); 2895 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2896 heap()->MoveBlock(dst_addr, src_addr, size); 2896 heap()->MoveBlock(dst_addr, src_addr, size);
2897 SlotsBuffer::AddTo(&slots_buffer_allocator_, 2897 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2898 &migration_slots_buffer_, 2898 &migration_slots_buffer_,
2899 SlotsBuffer::RELOCATED_CODE_OBJECT, 2899 SlotsBuffer::RELOCATED_CODE_OBJECT,
2900 dst_addr, 2900 dst_addr,
2901 SlotsBuffer::IGNORE_OVERFLOW); 2901 SlotsBuffer::IGNORE_OVERFLOW);
2902 Code::cast(dst)->Relocate(dst_addr - src_addr); 2902 Code::cast(dst)->Relocate(dst_addr - src_addr);
2903 } else { 2903 } else {
2904 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); 2904 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2905 heap()->MoveBlock(dst_addr, src_addr, size); 2905 heap()->MoveBlock(dst_addr, src_addr, size);
2906 } 2906 }
2907 heap()->OnMoveEvent(dst, src, size); 2907 heap()->OnMoveEvent(dst, src, size);
2908 Memory::Address_at(src_addr) = dst_addr; 2908 Memory::Address_at(src_addr) = dst_addr;
2909 } 2909 }
2910 2910
2911 2911
2912 // Visitor for updating pointers from live objects in old spaces to new space. 2912 // Visitor for updating pointers from live objects in old spaces to new space.
2913 // It does not expect to encounter pointers to dead objects. 2913 // It does not expect to encounter pointers to dead objects.
2914 class PointersUpdatingVisitor: public ObjectVisitor { 2914 class PointersUpdatingVisitor: public ObjectVisitor {
2915 public: 2915 public:
2916 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } 2916 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2917 2917
2918 void VisitPointer(Object** p) { 2918 void VisitPointer(Object** p) {
2919 UpdatePointer(p); 2919 UpdatePointer(p);
2920 } 2920 }
2921 2921
2922 void VisitPointers(Object** start, Object** end) { 2922 void VisitPointers(Object** start, Object** end) {
2923 for (Object** p = start; p < end; p++) UpdatePointer(p); 2923 for (Object** p = start; p < end; p++) UpdatePointer(p);
2924 } 2924 }
2925 2925
2926 void VisitEmbeddedPointer(RelocInfo* rinfo) { 2926 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2927 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); 2927 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2928 Object* target = rinfo->target_object(); 2928 Object* target = rinfo->target_object();
2929 Object* old_target = target; 2929 Object* old_target = target;
2930 VisitPointer(&target); 2930 VisitPointer(&target);
2931 // Avoid unnecessary changes that might unnecessary flush the instruction 2931 // Avoid unnecessary changes that might unnecessary flush the instruction
2932 // cache. 2932 // cache.
2933 if (target != old_target) { 2933 if (target != old_target) {
2934 rinfo->set_target_object(target); 2934 rinfo->set_target_object(target);
2935 } 2935 }
2936 } 2936 }
2937 2937
2938 void VisitCodeTarget(RelocInfo* rinfo) { 2938 void VisitCodeTarget(RelocInfo* rinfo) {
2939 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 2939 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2940 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 2940 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2941 Object* old_target = target; 2941 Object* old_target = target;
2942 VisitPointer(&target); 2942 VisitPointer(&target);
2943 if (target != old_target) { 2943 if (target != old_target) {
2944 rinfo->set_target_address(Code::cast(target)->instruction_start()); 2944 rinfo->set_target_address(Code::cast(target)->instruction_start());
2945 } 2945 }
2946 } 2946 }
2947 2947
2948 void VisitCodeAgeSequence(RelocInfo* rinfo) { 2948 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2949 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); 2949 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2950 Object* stub = rinfo->code_age_stub(); 2950 Object* stub = rinfo->code_age_stub();
2951 ASSERT(stub != NULL); 2951 DCHECK(stub != NULL);
2952 VisitPointer(&stub); 2952 VisitPointer(&stub);
2953 if (stub != rinfo->code_age_stub()) { 2953 if (stub != rinfo->code_age_stub()) {
2954 rinfo->set_code_age_stub(Code::cast(stub)); 2954 rinfo->set_code_age_stub(Code::cast(stub));
2955 } 2955 }
2956 } 2956 }
2957 2957
2958 void VisitDebugTarget(RelocInfo* rinfo) { 2958 void VisitDebugTarget(RelocInfo* rinfo) {
2959 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 2959 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2960 rinfo->IsPatchedReturnSequence()) || 2960 rinfo->IsPatchedReturnSequence()) ||
2961 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 2961 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2962 rinfo->IsPatchedDebugBreakSlotSequence())); 2962 rinfo->IsPatchedDebugBreakSlotSequence()));
2963 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); 2963 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2964 VisitPointer(&target); 2964 VisitPointer(&target);
2965 rinfo->set_call_address(Code::cast(target)->instruction_start()); 2965 rinfo->set_call_address(Code::cast(target)->instruction_start());
2966 } 2966 }
2967 2967
2968 static inline void UpdateSlot(Heap* heap, Object** slot) { 2968 static inline void UpdateSlot(Heap* heap, Object** slot) {
2969 Object* obj = *slot; 2969 Object* obj = *slot;
2970 2970
2971 if (!obj->IsHeapObject()) return; 2971 if (!obj->IsHeapObject()) return;
2972 2972
2973 HeapObject* heap_obj = HeapObject::cast(obj); 2973 HeapObject* heap_obj = HeapObject::cast(obj);
2974 2974
2975 MapWord map_word = heap_obj->map_word(); 2975 MapWord map_word = heap_obj->map_word();
2976 if (map_word.IsForwardingAddress()) { 2976 if (map_word.IsForwardingAddress()) {
2977 ASSERT(heap->InFromSpace(heap_obj) || 2977 DCHECK(heap->InFromSpace(heap_obj) ||
2978 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); 2978 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2979 HeapObject* target = map_word.ToForwardingAddress(); 2979 HeapObject* target = map_word.ToForwardingAddress();
2980 *slot = target; 2980 *slot = target;
2981 ASSERT(!heap->InFromSpace(target) && 2981 DCHECK(!heap->InFromSpace(target) &&
2982 !MarkCompactCollector::IsOnEvacuationCandidate(target)); 2982 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2983 } 2983 }
2984 } 2984 }
2985 2985
2986 private: 2986 private:
2987 inline void UpdatePointer(Object** p) { 2987 inline void UpdatePointer(Object** p) {
2988 UpdateSlot(heap_, p); 2988 UpdateSlot(heap_, p);
2989 } 2989 }
2990 2990
2991 Heap* heap_; 2991 Heap* heap_;
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
3027 if (map_word.IsForwardingAddress()) { 3027 if (map_word.IsForwardingAddress()) {
3028 return String::cast(map_word.ToForwardingAddress()); 3028 return String::cast(map_word.ToForwardingAddress());
3029 } 3029 }
3030 3030
3031 return String::cast(*p); 3031 return String::cast(*p);
3032 } 3032 }
3033 3033
3034 3034
3035 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, 3035 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3036 int object_size) { 3036 int object_size) {
3037 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 3037 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3038 3038
3039 OldSpace* target_space = heap()->TargetSpace(object); 3039 OldSpace* target_space = heap()->TargetSpace(object);
3040 3040
3041 ASSERT(target_space == heap()->old_pointer_space() || 3041 DCHECK(target_space == heap()->old_pointer_space() ||
3042 target_space == heap()->old_data_space()); 3042 target_space == heap()->old_data_space());
3043 HeapObject* target; 3043 HeapObject* target;
3044 AllocationResult allocation = target_space->AllocateRaw(object_size); 3044 AllocationResult allocation = target_space->AllocateRaw(object_size);
3045 if (allocation.To(&target)) { 3045 if (allocation.To(&target)) {
3046 MigrateObject(target, 3046 MigrateObject(target,
3047 object, 3047 object,
3048 object_size, 3048 object_size,
3049 target_space->identity()); 3049 target_space->identity());
3050 heap()->IncrementPromotedObjectsSize(object_size); 3050 heap()->IncrementPromotedObjectsSize(object_size);
3051 return true; 3051 return true;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3085 } 3085 }
3086 3086
3087 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3087 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3088 new_space->set_age_mark(new_space->top()); 3088 new_space->set_age_mark(new_space->top());
3089 } 3089 }
3090 3090
3091 3091
3092 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { 3092 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3093 AlwaysAllocateScope always_allocate(isolate()); 3093 AlwaysAllocateScope always_allocate(isolate());
3094 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3094 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3095 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); 3095 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3096 p->MarkSweptPrecisely(); 3096 p->MarkSweptPrecisely();
3097 3097
3098 int offsets[16]; 3098 int offsets[16];
3099 3099
3100 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3100 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3101 Address cell_base = it.CurrentCellBase(); 3101 Address cell_base = it.CurrentCellBase();
3102 MarkBit::CellType* cell = it.CurrentCell(); 3102 MarkBit::CellType* cell = it.CurrentCell();
3103 3103
3104 if (*cell == 0) continue; 3104 if (*cell == 0) continue;
3105 3105
3106 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3106 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3107 for (int i = 0; i < live_objects; i++) { 3107 for (int i = 0; i < live_objects; i++) {
3108 Address object_addr = cell_base + offsets[i] * kPointerSize; 3108 Address object_addr = cell_base + offsets[i] * kPointerSize;
3109 HeapObject* object = HeapObject::FromAddress(object_addr); 3109 HeapObject* object = HeapObject::FromAddress(object_addr);
3110 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); 3110 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3111 3111
3112 int size = object->Size(); 3112 int size = object->Size();
3113 3113
3114 HeapObject* target_object; 3114 HeapObject* target_object;
3115 AllocationResult allocation = space->AllocateRaw(size); 3115 AllocationResult allocation = space->AllocateRaw(size);
3116 if (!allocation.To(&target_object)) { 3116 if (!allocation.To(&target_object)) {
3117 // If allocation failed, use emergency memory and re-try allocation. 3117 // If allocation failed, use emergency memory and re-try allocation.
3118 CHECK(space->HasEmergencyMemory()); 3118 CHECK(space->HasEmergencyMemory());
3119 space->UseEmergencyMemory(); 3119 space->UseEmergencyMemory();
3120 allocation = space->AllocateRaw(size); 3120 allocation = space->AllocateRaw(size);
3121 } 3121 }
3122 if (!allocation.To(&target_object)) { 3122 if (!allocation.To(&target_object)) {
3123 // OS refused to give us memory. 3123 // OS refused to give us memory.
3124 V8::FatalProcessOutOfMemory("Evacuation"); 3124 V8::FatalProcessOutOfMemory("Evacuation");
3125 return; 3125 return;
3126 } 3126 }
3127 3127
3128 MigrateObject(target_object, object, size, space->identity()); 3128 MigrateObject(target_object, object, size, space->identity());
3129 ASSERT(object->map_word().IsForwardingAddress()); 3129 DCHECK(object->map_word().IsForwardingAddress());
3130 } 3130 }
3131 3131
3132 // Clear marking bits for current cell. 3132 // Clear marking bits for current cell.
3133 *cell = 0; 3133 *cell = 0;
3134 } 3134 }
3135 p->ResetLiveBytes(); 3135 p->ResetLiveBytes();
3136 } 3136 }
3137 3137
3138 3138
3139 void MarkCompactCollector::EvacuatePages() { 3139 void MarkCompactCollector::EvacuatePages() {
3140 int npages = evacuation_candidates_.length(); 3140 int npages = evacuation_candidates_.length();
3141 for (int i = 0; i < npages; i++) { 3141 for (int i = 0; i < npages; i++) {
3142 Page* p = evacuation_candidates_[i]; 3142 Page* p = evacuation_candidates_[i];
3143 ASSERT(p->IsEvacuationCandidate() || 3143 DCHECK(p->IsEvacuationCandidate() ||
3144 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3144 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3145 ASSERT(static_cast<int>(p->parallel_sweeping()) == 3145 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3146 MemoryChunk::SWEEPING_DONE); 3146 MemoryChunk::SWEEPING_DONE);
3147 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3147 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3148 // Allocate emergency memory for the case when compaction fails due to out 3148 // Allocate emergency memory for the case when compaction fails due to out
3149 // of memory. 3149 // of memory.
3150 if (!space->HasEmergencyMemory()) { 3150 if (!space->HasEmergencyMemory()) {
3151 space->CreateEmergencyMemory(); 3151 space->CreateEmergencyMemory();
3152 } 3152 }
3153 if (p->IsEvacuationCandidate()) { 3153 if (p->IsEvacuationCandidate()) {
3154 // During compaction we might have to request a new page. Check that we 3154 // During compaction we might have to request a new page. Check that we
3155 // have an emergency page and the space still has room for that. 3155 // have an emergency page and the space still has room for that.
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
3254 ZAP_FREE_SPACE 3254 ZAP_FREE_SPACE
3255 }; 3255 };
3256 3256
3257 3257
3258 template<MarkCompactCollector::SweepingParallelism mode> 3258 template<MarkCompactCollector::SweepingParallelism mode>
3259 static intptr_t Free(PagedSpace* space, 3259 static intptr_t Free(PagedSpace* space,
3260 FreeList* free_list, 3260 FreeList* free_list,
3261 Address start, 3261 Address start,
3262 int size) { 3262 int size) {
3263 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) { 3263 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3264 ASSERT(free_list == NULL); 3264 DCHECK(free_list == NULL);
3265 return space->Free(start, size); 3265 return space->Free(start, size);
3266 } else { 3266 } else {
3267 // TODO(hpayer): account for wasted bytes in concurrent sweeping too. 3267 // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3268 return size - free_list->Free(start, size); 3268 return size - free_list->Free(start, size);
3269 } 3269 }
3270 } 3270 }
3271 3271
3272 3272
3273 // Sweep a space precisely. After this has been done the space can 3273 // Sweep a space precisely. After this has been done the space can
3274 // be iterated precisely, hitting only the live objects. Code space 3274 // be iterated precisely, hitting only the live objects. Code space
3275 // is always swept precisely because we want to be able to iterate 3275 // is always swept precisely because we want to be able to iterate
3276 // over it. Map space is swept precisely, because it is not compacted. 3276 // over it. Map space is swept precisely, because it is not compacted.
3277 // Slots in live objects pointing into evacuation candidates are updated 3277 // Slots in live objects pointing into evacuation candidates are updated
3278 // if requested. 3278 // if requested.
3279 // Returns the size of the biggest continuous freed memory chunk in bytes. 3279 // Returns the size of the biggest continuous freed memory chunk in bytes.
3280 template<SweepingMode sweeping_mode, 3280 template<SweepingMode sweeping_mode,
3281 MarkCompactCollector::SweepingParallelism parallelism, 3281 MarkCompactCollector::SweepingParallelism parallelism,
3282 SkipListRebuildingMode skip_list_mode, 3282 SkipListRebuildingMode skip_list_mode,
3283 FreeSpaceTreatmentMode free_space_mode> 3283 FreeSpaceTreatmentMode free_space_mode>
3284 static int SweepPrecisely(PagedSpace* space, 3284 static int SweepPrecisely(PagedSpace* space,
3285 FreeList* free_list, 3285 FreeList* free_list,
3286 Page* p, 3286 Page* p,
3287 ObjectVisitor* v) { 3287 ObjectVisitor* v) {
3288 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3288 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3289 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3289 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3290 space->identity() == CODE_SPACE); 3290 space->identity() == CODE_SPACE);
3291 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3291 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3292 ASSERT(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || 3292 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3293 sweeping_mode == SWEEP_ONLY); 3293 sweeping_mode == SWEEP_ONLY);
3294 3294
3295 Address free_start = p->area_start(); 3295 Address free_start = p->area_start();
3296 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3296 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3297 int offsets[16]; 3297 int offsets[16];
3298 3298
3299 SkipList* skip_list = p->skip_list(); 3299 SkipList* skip_list = p->skip_list();
3300 int curr_region = -1; 3300 int curr_region = -1;
3301 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { 3301 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3302 skip_list->Clear(); 3302 skip_list->Clear();
3303 } 3303 }
3304 3304
3305 intptr_t freed_bytes = 0; 3305 intptr_t freed_bytes = 0;
3306 intptr_t max_freed_bytes = 0; 3306 intptr_t max_freed_bytes = 0;
(...skipping 12 matching lines...) Expand all
3319 } 3319 }
3320 freed_bytes = Free<parallelism>(space, free_list, free_start, size); 3320 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3321 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3321 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3322 #ifdef ENABLE_GDB_JIT_INTERFACE 3322 #ifdef ENABLE_GDB_JIT_INTERFACE
3323 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3323 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3324 GDBJITInterface::RemoveCodeRange(free_start, free_end); 3324 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3325 } 3325 }
3326 #endif 3326 #endif
3327 } 3327 }
3328 HeapObject* live_object = HeapObject::FromAddress(free_end); 3328 HeapObject* live_object = HeapObject::FromAddress(free_end);
3329 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); 3329 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3330 Map* map = live_object->map(); 3330 Map* map = live_object->map();
3331 int size = live_object->SizeFromMap(map); 3331 int size = live_object->SizeFromMap(map);
3332 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { 3332 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3333 live_object->IterateBody(map->instance_type(), size, v); 3333 live_object->IterateBody(map->instance_type(), size, v);
3334 } 3334 }
3335 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { 3335 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3336 int new_region_start = 3336 int new_region_start =
3337 SkipList::RegionNumber(free_end); 3337 SkipList::RegionNumber(free_end);
3338 int new_region_end = 3338 int new_region_end =
3339 SkipList::RegionNumber(free_end + size - kPointerSize); 3339 SkipList::RegionNumber(free_end + size - kPointerSize);
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
3435 MarkBit mark_bit = 3435 MarkBit mark_bit =
3436 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); 3436 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3437 3437
3438 return mark_bit.Get(); 3438 return mark_bit.Get();
3439 } 3439 }
3440 3440
3441 3441
3442 void MarkCompactCollector::InvalidateCode(Code* code) { 3442 void MarkCompactCollector::InvalidateCode(Code* code) {
3443 if (heap_->incremental_marking()->IsCompacting() && 3443 if (heap_->incremental_marking()->IsCompacting() &&
3444 !ShouldSkipEvacuationSlotRecording(code)) { 3444 !ShouldSkipEvacuationSlotRecording(code)) {
3445 ASSERT(compacting_); 3445 DCHECK(compacting_);
3446 3446
3447 // If the object is white than no slots were recorded on it yet. 3447 // If the object is white than no slots were recorded on it yet.
3448 MarkBit mark_bit = Marking::MarkBitFrom(code); 3448 MarkBit mark_bit = Marking::MarkBitFrom(code);
3449 if (Marking::IsWhite(mark_bit)) return; 3449 if (Marking::IsWhite(mark_bit)) return;
3450 3450
3451 invalidated_code_.Add(code); 3451 invalidated_code_.Add(code);
3452 } 3452 }
3453 } 3453 }
3454 3454
3455 3455
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
3569 } 3569 }
3570 } 3570 }
3571 } 3571 }
3572 } 3572 }
3573 3573
3574 int npages = evacuation_candidates_.length(); 3574 int npages = evacuation_candidates_.length();
3575 { GCTracer::Scope gc_scope( 3575 { GCTracer::Scope gc_scope(
3576 heap()->tracer(), GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); 3576 heap()->tracer(), GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3577 for (int i = 0; i < npages; i++) { 3577 for (int i = 0; i < npages; i++) {
3578 Page* p = evacuation_candidates_[i]; 3578 Page* p = evacuation_candidates_[i];
3579 ASSERT(p->IsEvacuationCandidate() || 3579 DCHECK(p->IsEvacuationCandidate() ||
3580 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3580 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3581 3581
3582 if (p->IsEvacuationCandidate()) { 3582 if (p->IsEvacuationCandidate()) {
3583 SlotsBuffer::UpdateSlotsRecordedIn(heap_, 3583 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3584 p->slots_buffer(), 3584 p->slots_buffer(),
3585 code_slots_filtering_required); 3585 code_slots_filtering_required);
3586 if (FLAG_trace_fragmentation) { 3586 if (FLAG_trace_fragmentation) {
3587 PrintF(" page %p slots buffer: %d\n", 3587 PrintF(" page %p slots buffer: %d\n",
3588 reinterpret_cast<void*>(p), 3588 reinterpret_cast<void*>(p),
3589 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3589 SlotsBuffer::SizeOfChain(p->slots_buffer()));
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
3675 EvacuationWeakObjectRetainer evacuation_object_retainer; 3675 EvacuationWeakObjectRetainer evacuation_object_retainer;
3676 heap()->ProcessWeakReferences(&evacuation_object_retainer); 3676 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3677 3677
3678 // Visit invalidated code (we ignored all slots on it) and clear mark-bits 3678 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3679 // under it. 3679 // under it.
3680 ProcessInvalidatedCode(&updating_visitor); 3680 ProcessInvalidatedCode(&updating_visitor);
3681 3681
3682 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); 3682 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3683 3683
3684 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); 3684 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3685 ASSERT(migration_slots_buffer_ == NULL); 3685 DCHECK(migration_slots_buffer_ == NULL);
3686 } 3686 }
3687 3687
3688 3688
3689 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { 3689 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3690 int npages = evacuation_candidates_.length(); 3690 int npages = evacuation_candidates_.length();
3691 for (int i = 0; i < npages; i++) { 3691 for (int i = 0; i < npages; i++) {
3692 Page* p = evacuation_candidates_[i]; 3692 Page* p = evacuation_candidates_[i];
3693 if (!p->IsEvacuationCandidate()) continue; 3693 if (!p->IsEvacuationCandidate()) continue;
3694 p->Unlink(); 3694 p->Unlink();
3695 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3695 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
3909 #undef X 3909 #undef X
3910 3910
3911 3911
3912 // Takes a word of mark bits. Returns the number of objects that start in the 3912 // Takes a word of mark bits. Returns the number of objects that start in the
3913 // range. Puts the offsets of the words in the supplied array. 3913 // range. Puts the offsets of the words in the supplied array.
3914 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { 3914 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3915 int objects = 0; 3915 int objects = 0;
3916 int offset = 0; 3916 int offset = 0;
3917 3917
3918 // No consecutive 1 bits. 3918 // No consecutive 1 bits.
3919 ASSERT((mark_bits & 0x180) != 0x180); 3919 DCHECK((mark_bits & 0x180) != 0x180);
3920 ASSERT((mark_bits & 0x18000) != 0x18000); 3920 DCHECK((mark_bits & 0x18000) != 0x18000);
3921 ASSERT((mark_bits & 0x1800000) != 0x1800000); 3921 DCHECK((mark_bits & 0x1800000) != 0x1800000);
3922 3922
3923 while (mark_bits != 0) { 3923 while (mark_bits != 0) {
3924 int byte = (mark_bits & 0xff); 3924 int byte = (mark_bits & 0xff);
3925 mark_bits >>= 8; 3925 mark_bits >>= 8;
3926 if (byte != 0) { 3926 if (byte != 0) {
3927 ASSERT(byte < kStartTableLines); // No consecutive 1 bits. 3927 DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
3928 char* table = kStartTable + byte * kStartTableEntriesPerLine; 3928 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3929 int objects_in_these_8_words = table[0]; 3929 int objects_in_these_8_words = table[0];
3930 ASSERT(objects_in_these_8_words != kStartTableInvalidLine); 3930 DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
3931 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); 3931 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
3932 for (int i = 0; i < objects_in_these_8_words; i++) { 3932 for (int i = 0; i < objects_in_these_8_words; i++) {
3933 starts[objects++] = offset + table[1 + i]; 3933 starts[objects++] = offset + table[1 + i];
3934 } 3934 }
3935 } 3935 }
3936 offset += 8; 3936 offset += 8;
3937 } 3937 }
3938 return objects; 3938 return objects;
3939 } 3939 }
3940 3940
3941 3941
3942 static inline Address DigestFreeStart(Address approximate_free_start, 3942 static inline Address DigestFreeStart(Address approximate_free_start,
3943 uint32_t free_start_cell) { 3943 uint32_t free_start_cell) {
3944 ASSERT(free_start_cell != 0); 3944 DCHECK(free_start_cell != 0);
3945 3945
3946 // No consecutive 1 bits. 3946 // No consecutive 1 bits.
3947 ASSERT((free_start_cell & (free_start_cell << 1)) == 0); 3947 DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
3948 3948
3949 int offsets[16]; 3949 int offsets[16];
3950 uint32_t cell = free_start_cell; 3950 uint32_t cell = free_start_cell;
3951 int offset_of_last_live; 3951 int offset_of_last_live;
3952 if ((cell & 0x80000000u) != 0) { 3952 if ((cell & 0x80000000u) != 0) {
3953 // This case would overflow below. 3953 // This case would overflow below.
3954 offset_of_last_live = 31; 3954 offset_of_last_live = 31;
3955 } else { 3955 } else {
3956 // Remove all but one bit, the most significant. This is an optimization 3956 // Remove all but one bit, the most significant. This is an optimization
3957 // that may or may not be worthwhile. 3957 // that may or may not be worthwhile.
3958 cell |= cell >> 16; 3958 cell |= cell >> 16;
3959 cell |= cell >> 8; 3959 cell |= cell >> 8;
3960 cell |= cell >> 4; 3960 cell |= cell >> 4;
3961 cell |= cell >> 2; 3961 cell |= cell >> 2;
3962 cell |= cell >> 1; 3962 cell |= cell >> 1;
3963 cell = (cell + 1) >> 1; 3963 cell = (cell + 1) >> 1;
3964 int live_objects = MarkWordToObjectStarts(cell, offsets); 3964 int live_objects = MarkWordToObjectStarts(cell, offsets);
3965 ASSERT(live_objects == 1); 3965 DCHECK(live_objects == 1);
3966 offset_of_last_live = offsets[live_objects - 1]; 3966 offset_of_last_live = offsets[live_objects - 1];
3967 } 3967 }
3968 Address last_live_start = 3968 Address last_live_start =
3969 approximate_free_start + offset_of_last_live * kPointerSize; 3969 approximate_free_start + offset_of_last_live * kPointerSize;
3970 HeapObject* last_live = HeapObject::FromAddress(last_live_start); 3970 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3971 Address free_start = last_live_start + last_live->Size(); 3971 Address free_start = last_live_start + last_live->Size();
3972 return free_start; 3972 return free_start;
3973 } 3973 }
3974 3974
3975 3975
3976 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { 3976 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3977 ASSERT(cell != 0); 3977 DCHECK(cell != 0);
3978 3978
3979 // No consecutive 1 bits. 3979 // No consecutive 1 bits.
3980 ASSERT((cell & (cell << 1)) == 0); 3980 DCHECK((cell & (cell << 1)) == 0);
3981 3981
3982 int offsets[16]; 3982 int offsets[16];
3983 if (cell == 0x80000000u) { // Avoid overflow below. 3983 if (cell == 0x80000000u) { // Avoid overflow below.
3984 return block_address + 31 * kPointerSize; 3984 return block_address + 31 * kPointerSize;
3985 } 3985 }
3986 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; 3986 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3987 ASSERT((first_set_bit & cell) == first_set_bit); 3987 DCHECK((first_set_bit & cell) == first_set_bit);
3988 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); 3988 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3989 ASSERT(live_objects == 1); 3989 DCHECK(live_objects == 1);
3990 USE(live_objects); 3990 USE(live_objects);
3991 return block_address + offsets[0] * kPointerSize; 3991 return block_address + offsets[0] * kPointerSize;
3992 } 3992 }
3993 3993
3994 3994
3995 // Force instantiation of templatized SweepConservatively method for 3995 // Force instantiation of templatized SweepConservatively method for
3996 // SWEEP_ON_MAIN_THREAD mode. 3996 // SWEEP_ON_MAIN_THREAD mode.
3997 template int MarkCompactCollector:: 3997 template int MarkCompactCollector::
3998 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>( 3998 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
3999 PagedSpace*, FreeList*, Page*); 3999 PagedSpace*, FreeList*, Page*);
(...skipping 10 matching lines...) Expand all
4010 // spaces have been put on the free list and the smaller ones have been 4010 // spaces have been put on the free list and the smaller ones have been
4011 // ignored and left untouched. A free space is always either ignored or put 4011 // ignored and left untouched. A free space is always either ignored or put
4012 // on the free list, never split up into two parts. This is important 4012 // on the free list, never split up into two parts. This is important
4013 // because it means that any FreeSpace maps left actually describe a region of 4013 // because it means that any FreeSpace maps left actually describe a region of
4014 // memory that can be ignored when scanning. Dead objects other than free 4014 // memory that can be ignored when scanning. Dead objects other than free
4015 // spaces will not contain the free space map. 4015 // spaces will not contain the free space map.
4016 template<MarkCompactCollector::SweepingParallelism mode> 4016 template<MarkCompactCollector::SweepingParallelism mode>
4017 int MarkCompactCollector::SweepConservatively(PagedSpace* space, 4017 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
4018 FreeList* free_list, 4018 FreeList* free_list,
4019 Page* p) { 4019 Page* p) {
4020 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 4020 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
4021 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && 4021 DCHECK((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
4022 free_list != NULL) || 4022 free_list != NULL) ||
4023 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && 4023 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
4024 free_list == NULL)); 4024 free_list == NULL));
4025 4025
4026 intptr_t freed_bytes = 0; 4026 intptr_t freed_bytes = 0;
4027 intptr_t max_freed_bytes = 0; 4027 intptr_t max_freed_bytes = 0;
4028 size_t size = 0; 4028 size_t size = 0;
4029 4029
4030 // Skip over all the dead objects at the start of the page and mark them free. 4030 // Skip over all the dead objects at the start of the page and mark them free.
4031 Address cell_base = 0; 4031 Address cell_base = 0;
4032 MarkBit::CellType* cell = NULL; 4032 MarkBit::CellType* cell = NULL;
4033 MarkBitCellIterator it(p); 4033 MarkBitCellIterator it(p);
4034 for (; !it.Done(); it.Advance()) { 4034 for (; !it.Done(); it.Advance()) {
4035 cell_base = it.CurrentCellBase(); 4035 cell_base = it.CurrentCellBase();
4036 cell = it.CurrentCell(); 4036 cell = it.CurrentCell();
4037 if (*cell != 0) break; 4037 if (*cell != 0) break;
4038 } 4038 }
4039 4039
4040 if (it.Done()) { 4040 if (it.Done()) {
4041 size = p->area_end() - p->area_start(); 4041 size = p->area_end() - p->area_start();
4042 freed_bytes = Free<mode>(space, free_list, p->area_start(), 4042 freed_bytes = Free<mode>(space, free_list, p->area_start(),
4043 static_cast<int>(size)); 4043 static_cast<int>(size));
4044 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 4044 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4045 ASSERT_EQ(0, p->LiveBytes()); 4045 DCHECK_EQ(0, p->LiveBytes());
4046 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { 4046 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
4047 // When concurrent sweeping is active, the page will be marked after 4047 // When concurrent sweeping is active, the page will be marked after
4048 // sweeping by the main thread. 4048 // sweeping by the main thread.
4049 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); 4049 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
4050 } else { 4050 } else {
4051 p->MarkSweptConservatively(); 4051 p->MarkSweptConservatively();
4052 } 4052 }
4053 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 4053 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
4054 } 4054 }
4055 4055
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
4119 4119
4120 4120
4121 int MarkCompactCollector::SweepInParallel(PagedSpace* space, 4121 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4122 int required_freed_bytes) { 4122 int required_freed_bytes) {
4123 int max_freed = 0; 4123 int max_freed = 0;
4124 int max_freed_overall = 0; 4124 int max_freed_overall = 0;
4125 PageIterator it(space); 4125 PageIterator it(space);
4126 while (it.has_next()) { 4126 while (it.has_next()) {
4127 Page* p = it.next(); 4127 Page* p = it.next();
4128 max_freed = SweepInParallel(p, space); 4128 max_freed = SweepInParallel(p, space);
4129 ASSERT(max_freed >= 0); 4129 DCHECK(max_freed >= 0);
4130 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { 4130 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4131 return max_freed; 4131 return max_freed;
4132 } 4132 }
4133 max_freed_overall = Max(max_freed, max_freed_overall); 4133 max_freed_overall = Max(max_freed, max_freed_overall);
4134 if (p == space->end_of_unswept_pages()) break; 4134 if (p == space->end_of_unswept_pages()) break;
4135 } 4135 }
4136 return max_freed_overall; 4136 return max_freed_overall;
4137 } 4137 }
4138 4138
4139 4139
(...skipping 29 matching lines...) Expand all
4169 space->set_end_of_unswept_pages(space->FirstPage()); 4169 space->set_end_of_unswept_pages(space->FirstPage());
4170 4170
4171 PageIterator it(space); 4171 PageIterator it(space);
4172 4172
4173 int pages_swept = 0; 4173 int pages_swept = 0;
4174 bool unused_page_present = false; 4174 bool unused_page_present = false;
4175 bool parallel_sweeping_active = false; 4175 bool parallel_sweeping_active = false;
4176 4176
4177 while (it.has_next()) { 4177 while (it.has_next()) {
4178 Page* p = it.next(); 4178 Page* p = it.next();
4179 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); 4179 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4180 4180
4181 // Clear sweeping flags indicating that marking bits are still intact. 4181 // Clear sweeping flags indicating that marking bits are still intact.
4182 p->ClearSweptPrecisely(); 4182 p->ClearSweptPrecisely();
4183 p->ClearSweptConservatively(); 4183 p->ClearSweptConservatively();
4184 4184
4185 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || 4185 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4186 p->IsEvacuationCandidate()) { 4186 p->IsEvacuationCandidate()) {
4187 // Will be processed in EvacuateNewSpaceAndCandidates. 4187 // Will be processed in EvacuateNewSpaceAndCandidates.
4188 ASSERT(evacuation_candidates_.length() > 0); 4188 DCHECK(evacuation_candidates_.length() > 0);
4189 continue; 4189 continue;
4190 } 4190 }
4191 4191
4192 // One unused page is kept, all further are released before sweeping them. 4192 // One unused page is kept, all further are released before sweeping them.
4193 if (p->LiveBytes() == 0) { 4193 if (p->LiveBytes() == 0) {
4194 if (unused_page_present) { 4194 if (unused_page_present) {
4195 if (FLAG_gc_verbose) { 4195 if (FLAG_gc_verbose) {
4196 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", 4196 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4197 reinterpret_cast<intptr_t>(p)); 4197 reinterpret_cast<intptr_t>(p));
4198 } 4198 }
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
4390 while (it.has_next()) { 4390 while (it.has_next()) {
4391 Page* p = it.next(); 4391 Page* p = it.next();
4392 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { 4392 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4393 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); 4393 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4394 if (space->swept_precisely()) { 4394 if (space->swept_precisely()) {
4395 p->MarkSweptPrecisely(); 4395 p->MarkSweptPrecisely();
4396 } else { 4396 } else {
4397 p->MarkSweptConservatively(); 4397 p->MarkSweptConservatively();
4398 } 4398 }
4399 } 4399 }
4400 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); 4400 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4401 } 4401 }
4402 } 4402 }
4403 4403
4404 4404
4405 void MarkCompactCollector::ParallelSweepSpacesComplete() { 4405 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4406 ParallelSweepSpaceComplete(heap()->old_pointer_space()); 4406 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4407 ParallelSweepSpaceComplete(heap()->old_data_space()); 4407 ParallelSweepSpaceComplete(heap()->old_data_space());
4408 } 4408 }
4409 4409
4410 4410
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
4464 AdditionMode mode) { 4464 AdditionMode mode) {
4465 SlotsBuffer* buffer = *buffer_address; 4465 SlotsBuffer* buffer = *buffer_address;
4466 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { 4466 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4467 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { 4467 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4468 allocator->DeallocateChain(buffer_address); 4468 allocator->DeallocateChain(buffer_address);
4469 return false; 4469 return false;
4470 } 4470 }
4471 buffer = allocator->AllocateBuffer(buffer); 4471 buffer = allocator->AllocateBuffer(buffer);
4472 *buffer_address = buffer; 4472 *buffer_address = buffer;
4473 } 4473 }
4474 ASSERT(buffer->HasSpaceForTypedSlot()); 4474 DCHECK(buffer->HasSpaceForTypedSlot());
4475 buffer->Add(reinterpret_cast<ObjectSlot>(type)); 4475 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4476 buffer->Add(reinterpret_cast<ObjectSlot>(addr)); 4476 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4477 return true; 4477 return true;
4478 } 4478 }
4479 4479
4480 4480
4481 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { 4481 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4482 if (RelocInfo::IsCodeTarget(rmode)) { 4482 if (RelocInfo::IsCodeTarget(rmode)) {
4483 return SlotsBuffer::CODE_TARGET_SLOT; 4483 return SlotsBuffer::CODE_TARGET_SLOT;
4484 } else if (RelocInfo::IsEmbeddedObject(rmode)) { 4484 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
4537 SlotsBuffer::CODE_ENTRY_SLOT, 4537 SlotsBuffer::CODE_ENTRY_SLOT,
4538 slot, 4538 slot,
4539 SlotsBuffer::FAIL_ON_OVERFLOW)) { 4539 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4540 EvictEvacuationCandidate(target_page); 4540 EvictEvacuationCandidate(target_page);
4541 } 4541 }
4542 } 4542 }
4543 } 4543 }
4544 4544
4545 4545
4546 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { 4546 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4547 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT); 4547 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4548 if (is_compacting()) { 4548 if (is_compacting()) {
4549 Code* host = isolate()->inner_pointer_to_code_cache()-> 4549 Code* host = isolate()->inner_pointer_to_code_cache()->
4550 GcSafeFindCodeForInnerPointer(pc); 4550 GcSafeFindCodeForInnerPointer(pc);
4551 MarkBit mark_bit = Marking::MarkBitFrom(host); 4551 MarkBit mark_bit = Marking::MarkBitFrom(host);
4552 if (Marking::IsBlack(mark_bit)) { 4552 if (Marking::IsBlack(mark_bit)) {
4553 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4553 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4554 RecordRelocSlot(&rinfo, target); 4554 RecordRelocSlot(&rinfo, target);
4555 } 4555 }
4556 } 4556 }
4557 } 4557 }
4558 4558
4559 4559
4560 static inline SlotsBuffer::SlotType DecodeSlotType( 4560 static inline SlotsBuffer::SlotType DecodeSlotType(
4561 SlotsBuffer::ObjectSlot slot) { 4561 SlotsBuffer::ObjectSlot slot) {
4562 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); 4562 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4563 } 4563 }
4564 4564
4565 4565
4566 void SlotsBuffer::UpdateSlots(Heap* heap) { 4566 void SlotsBuffer::UpdateSlots(Heap* heap) {
4567 PointersUpdatingVisitor v(heap); 4567 PointersUpdatingVisitor v(heap);
4568 4568
4569 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { 4569 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4570 ObjectSlot slot = slots_[slot_idx]; 4570 ObjectSlot slot = slots_[slot_idx];
4571 if (!IsTypedSlot(slot)) { 4571 if (!IsTypedSlot(slot)) {
4572 PointersUpdatingVisitor::UpdateSlot(heap, slot); 4572 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4573 } else { 4573 } else {
4574 ++slot_idx; 4574 ++slot_idx;
4575 ASSERT(slot_idx < idx_); 4575 DCHECK(slot_idx < idx_);
4576 UpdateSlot(heap->isolate(), 4576 UpdateSlot(heap->isolate(),
4577 &v, 4577 &v,
4578 DecodeSlotType(slot), 4578 DecodeSlotType(slot),
4579 reinterpret_cast<Address>(slots_[slot_idx])); 4579 reinterpret_cast<Address>(slots_[slot_idx]));
4580 } 4580 }
4581 } 4581 }
4582 } 4582 }
4583 4583
4584 4584
4585 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { 4585 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4586 PointersUpdatingVisitor v(heap); 4586 PointersUpdatingVisitor v(heap);
4587 4587
4588 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { 4588 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4589 ObjectSlot slot = slots_[slot_idx]; 4589 ObjectSlot slot = slots_[slot_idx];
4590 if (!IsTypedSlot(slot)) { 4590 if (!IsTypedSlot(slot)) {
4591 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { 4591 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4592 PointersUpdatingVisitor::UpdateSlot(heap, slot); 4592 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4593 } 4593 }
4594 } else { 4594 } else {
4595 ++slot_idx; 4595 ++slot_idx;
4596 ASSERT(slot_idx < idx_); 4596 DCHECK(slot_idx < idx_);
4597 Address pc = reinterpret_cast<Address>(slots_[slot_idx]); 4597 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4598 if (!IsOnInvalidatedCodeObject(pc)) { 4598 if (!IsOnInvalidatedCodeObject(pc)) {
4599 UpdateSlot(heap->isolate(), 4599 UpdateSlot(heap->isolate(),
4600 &v, 4600 &v,
4601 DecodeSlotType(slot), 4601 DecodeSlotType(slot),
4602 reinterpret_cast<Address>(slots_[slot_idx])); 4602 reinterpret_cast<Address>(slots_[slot_idx]));
4603 } 4603 }
4604 } 4604 }
4605 } 4605 }
4606 } 4606 }
(...skipping 14 matching lines...) Expand all
4621 while (buffer != NULL) { 4621 while (buffer != NULL) {
4622 SlotsBuffer* next_buffer = buffer->next(); 4622 SlotsBuffer* next_buffer = buffer->next();
4623 DeallocateBuffer(buffer); 4623 DeallocateBuffer(buffer);
4624 buffer = next_buffer; 4624 buffer = next_buffer;
4625 } 4625 }
4626 *buffer_address = NULL; 4626 *buffer_address = NULL;
4627 } 4627 }
4628 4628
4629 4629
4630 } } // namespace v8::internal 4630 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/mark-compact-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698