Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: src/mark-compact.cc

Issue 185653004: Experimental parser: merge to r19637 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/messages.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
49 49
50 const char* Marking::kWhiteBitPattern = "00"; 50 const char* Marking::kWhiteBitPattern = "00";
51 const char* Marking::kBlackBitPattern = "10"; 51 const char* Marking::kBlackBitPattern = "10";
52 const char* Marking::kGreyBitPattern = "11"; 52 const char* Marking::kGreyBitPattern = "11";
53 const char* Marking::kImpossibleBitPattern = "01"; 53 const char* Marking::kImpossibleBitPattern = "01";
54 54
55 55
56 // ------------------------------------------------------------------------- 56 // -------------------------------------------------------------------------
57 // MarkCompactCollector 57 // MarkCompactCollector
58 58
59 MarkCompactCollector::MarkCompactCollector() : // NOLINT 59 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
60 #ifdef DEBUG 60 #ifdef DEBUG
61 state_(IDLE), 61 state_(IDLE),
62 #endif 62 #endif
63 sweep_precisely_(false), 63 sweep_precisely_(false),
64 reduce_memory_footprint_(false), 64 reduce_memory_footprint_(false),
65 abort_incremental_marking_(false), 65 abort_incremental_marking_(false),
66 marking_parity_(ODD_MARKING_PARITY), 66 marking_parity_(ODD_MARKING_PARITY),
67 compacting_(false), 67 compacting_(false),
68 was_marked_incrementally_(false), 68 was_marked_incrementally_(false),
69 sweeping_pending_(false), 69 sweeping_pending_(false),
70 pending_sweeper_jobs_semaphore_(0),
70 sequential_sweeping_(false), 71 sequential_sweeping_(false),
71 tracer_(NULL), 72 tracer_(NULL),
72 migration_slots_buffer_(NULL), 73 migration_slots_buffer_(NULL),
73 heap_(NULL), 74 heap_(heap),
74 code_flusher_(NULL), 75 code_flusher_(NULL),
75 encountered_weak_collections_(NULL), 76 encountered_weak_collections_(NULL),
76 have_code_to_deoptimize_(false) { } 77 have_code_to_deoptimize_(false) { }
77 78
78 #ifdef VERIFY_HEAP 79 #ifdef VERIFY_HEAP
79 class VerifyMarkingVisitor: public ObjectVisitor { 80 class VerifyMarkingVisitor: public ObjectVisitor {
80 public: 81 public:
81 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} 82 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
82 83
83 void VisitPointers(Object** start, Object** end) { 84 void VisitPointers(Object** start, Object** end) {
84 for (Object** current = start; current < end; current++) { 85 for (Object** current = start; current < end; current++) {
85 if ((*current)->IsHeapObject()) { 86 if ((*current)->IsHeapObject()) {
86 HeapObject* object = HeapObject::cast(*current); 87 HeapObject* object = HeapObject::cast(*current);
87 CHECK(heap_->mark_compact_collector()->IsMarked(object)); 88 CHECK(heap_->mark_compact_collector()->IsMarked(object));
88 } 89 }
89 } 90 }
90 } 91 }
91 92
92 void VisitEmbeddedPointer(RelocInfo* rinfo) { 93 void VisitEmbeddedPointer(RelocInfo* rinfo) {
93 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); 94 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
94 if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), 95 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
95 rinfo->target_object())) {
96 Object* p = rinfo->target_object(); 96 Object* p = rinfo->target_object();
97 VisitPointer(&p); 97 VisitPointer(&p);
98 } 98 }
99 } 99 }
100 100
101 void VisitCell(RelocInfo* rinfo) { 101 void VisitCell(RelocInfo* rinfo) {
102 Code* code = rinfo->host(); 102 Code* code = rinfo->host();
103 ASSERT(rinfo->rmode() == RelocInfo::CELL); 103 ASSERT(rinfo->rmode() == RelocInfo::CELL);
104 if (!Code::IsWeakEmbeddedObject(code->kind(), rinfo->target_cell())) { 104 if (!code->IsWeakObject(rinfo->target_cell())) {
105 ObjectVisitor::VisitCell(rinfo); 105 ObjectVisitor::VisitCell(rinfo);
106 } 106 }
107 } 107 }
108 108
109 private: 109 private:
110 Heap* heap_; 110 Heap* heap_;
111 }; 111 };
112 112
113 113
114 static void VerifyMarking(Heap* heap, Address bottom, Address top) { 114 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
341 HeapObjectIterator it(heap->code_space()); 341 HeapObjectIterator it(heap->code_space());
342 342
343 for (Object* object = it.Next(); object != NULL; object = it.Next()) { 343 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
344 VerifyNativeContextSeparationVisitor visitor; 344 VerifyNativeContextSeparationVisitor visitor;
345 Code::cast(object)->CodeIterateBody(&visitor); 345 Code::cast(object)->CodeIterateBody(&visitor);
346 } 346 }
347 } 347 }
348 #endif 348 #endif
349 349
350 350
351 void MarkCompactCollector::SetUp() {
352 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
353 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
354 }
355
356
351 void MarkCompactCollector::TearDown() { 357 void MarkCompactCollector::TearDown() {
352 AbortCompaction(); 358 AbortCompaction();
353 } 359 }
354 360
355 361
356 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { 362 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
357 p->MarkEvacuationCandidate(); 363 p->MarkEvacuationCandidate();
358 evacuation_candidates_.Add(p); 364 evacuation_candidates_.Add(p);
359 } 365 }
360 366
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
556 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 562 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
557 MarkBit mark_bit = Marking::MarkBitFrom(obj); 563 MarkBit mark_bit = Marking::MarkBitFrom(obj);
558 mark_bit.Clear(); 564 mark_bit.Clear();
559 mark_bit.Next().Clear(); 565 mark_bit.Next().Clear();
560 Page::FromAddress(obj->address())->ResetProgressBar(); 566 Page::FromAddress(obj->address())->ResetProgressBar();
561 Page::FromAddress(obj->address())->ResetLiveBytes(); 567 Page::FromAddress(obj->address())->ResetLiveBytes();
562 } 568 }
563 } 569 }
564 570
565 571
572 class MarkCompactCollector::SweeperTask : public v8::Task {
573 public:
574 SweeperTask(Heap* heap, PagedSpace* space)
575 : heap_(heap), space_(space) {}
576
577 virtual ~SweeperTask() {}
578
579 private:
580 // v8::Task overrides.
581 virtual void Run() V8_OVERRIDE {
582 heap_->mark_compact_collector()->SweepInParallel(space_);
583 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
584 }
585
586 Heap* heap_;
587 PagedSpace* space_;
588
589 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
590 };
591
592
566 void MarkCompactCollector::StartSweeperThreads() { 593 void MarkCompactCollector::StartSweeperThreads() {
594 // TODO(hpayer): This check is just used for debugging purpose and
595 // should be removed or turned into an assert after investigating the
596 // crash in concurrent sweeping.
597 CHECK(free_list_old_pointer_space_.get()->IsEmpty());
598 CHECK(free_list_old_data_space_.get()->IsEmpty());
567 sweeping_pending_ = true; 599 sweeping_pending_ = true;
568 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 600 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
569 isolate()->sweeper_threads()[i]->StartSweeping(); 601 isolate()->sweeper_threads()[i]->StartSweeping();
570 } 602 }
603 if (FLAG_job_based_sweeping) {
604 V8::GetCurrentPlatform()->CallOnBackgroundThread(
605 new SweeperTask(heap(), heap()->old_data_space()),
606 v8::Platform::kShortRunningTask);
607 V8::GetCurrentPlatform()->CallOnBackgroundThread(
608 new SweeperTask(heap(), heap()->old_pointer_space()),
609 v8::Platform::kShortRunningTask);
610 }
571 } 611 }
572 612
573 613
574 void MarkCompactCollector::WaitUntilSweepingCompleted() { 614 void MarkCompactCollector::WaitUntilSweepingCompleted() {
575 ASSERT(sweeping_pending_ == true); 615 ASSERT(sweeping_pending_ == true);
576 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 616 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
577 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); 617 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
578 } 618 }
619 if (FLAG_job_based_sweeping) {
620 // Wait twice for both jobs.
621 pending_sweeper_jobs_semaphore_.Wait();
622 pending_sweeper_jobs_semaphore_.Wait();
623 }
624 ParallelSweepSpacesComplete();
579 sweeping_pending_ = false; 625 sweeping_pending_ = false;
580 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); 626 RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
581 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); 627 RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
582 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); 628 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
583 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); 629 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
584 } 630 }
585 631
586 632
587 intptr_t MarkCompactCollector:: 633 intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
588 StealMemoryFromSweeperThreads(PagedSpace* space) { 634 FreeList* free_list;
589 intptr_t freed_bytes = 0; 635
590 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 636 if (space == heap()->old_pointer_space()) {
591 freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space); 637 free_list = free_list_old_pointer_space_.get();
638 } else if (space == heap()->old_data_space()) {
639 free_list = free_list_old_data_space_.get();
640 } else {
641 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
642 // to only refill them for old data and pointer spaces.
643 return 0;
592 } 644 }
645
646 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
593 space->AddToAccountingStats(freed_bytes); 647 space->AddToAccountingStats(freed_bytes);
594 space->DecrementUnsweptFreeBytes(freed_bytes); 648 space->DecrementUnsweptFreeBytes(freed_bytes);
595 return freed_bytes; 649 return freed_bytes;
596 } 650 }
597 651
598 652
599 bool MarkCompactCollector::AreSweeperThreadsActivated() { 653 bool MarkCompactCollector::AreSweeperThreadsActivated() {
600 return isolate()->sweeper_threads() != NULL; 654 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
601 } 655 }
602 656
603 657
604 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { 658 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
605 return sweeping_pending_; 659 return sweeping_pending_;
606 } 660 }
607 661
608 662
609 bool Marking::TransferMark(Address old_start, Address new_start) { 663 bool Marking::TransferMark(Address old_start, Address new_start) {
610 // This is only used when resizing an object. 664 // This is only used when resizing an object.
(...skipping 1981 matching lines...) Expand 10 before | Expand all | Expand 10 after
2592 2646
2593 int new_number_of_transitions = 0; 2647 int new_number_of_transitions = 0;
2594 const int header = Map::kProtoTransitionHeaderSize; 2648 const int header = Map::kProtoTransitionHeaderSize;
2595 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; 2649 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2596 const int map_offset = header + Map::kProtoTransitionMapOffset; 2650 const int map_offset = header + Map::kProtoTransitionMapOffset;
2597 const int step = Map::kProtoTransitionElementsPerEntry; 2651 const int step = Map::kProtoTransitionElementsPerEntry;
2598 for (int i = 0; i < number_of_transitions; i++) { 2652 for (int i = 0; i < number_of_transitions; i++) {
2599 Object* prototype = prototype_transitions->get(proto_offset + i * step); 2653 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2600 Object* cached_map = prototype_transitions->get(map_offset + i * step); 2654 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2601 if (IsMarked(prototype) && IsMarked(cached_map)) { 2655 if (IsMarked(prototype) && IsMarked(cached_map)) {
2656 ASSERT(!prototype->IsUndefined());
2602 int proto_index = proto_offset + new_number_of_transitions * step; 2657 int proto_index = proto_offset + new_number_of_transitions * step;
2603 int map_index = map_offset + new_number_of_transitions * step; 2658 int map_index = map_offset + new_number_of_transitions * step;
2604 if (new_number_of_transitions != i) { 2659 if (new_number_of_transitions != i) {
2605 prototype_transitions->set( 2660 prototype_transitions->set(
2606 proto_index, 2661 proto_index,
2607 prototype, 2662 prototype,
2608 UPDATE_WRITE_BARRIER); 2663 UPDATE_WRITE_BARRIER);
2609 prototype_transitions->set( 2664 prototype_transitions->set(
2610 map_index, 2665 map_index,
2611 cached_map, 2666 cached_map,
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
2766 // pointers to new space. 2821 // pointers to new space.
2767 void MarkCompactCollector::MigrateObject(Address dst, 2822 void MarkCompactCollector::MigrateObject(Address dst,
2768 Address src, 2823 Address src,
2769 int size, 2824 int size,
2770 AllocationSpace dest) { 2825 AllocationSpace dest) {
2771 HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler(); 2826 HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2772 if (heap_profiler->is_tracking_object_moves()) { 2827 if (heap_profiler->is_tracking_object_moves()) {
2773 heap_profiler->ObjectMoveEvent(src, dst, size); 2828 heap_profiler->ObjectMoveEvent(src, dst, size);
2774 } 2829 }
2775 ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest)); 2830 ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2776 ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize); 2831 ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2777 if (dest == OLD_POINTER_SPACE) { 2832 if (dest == OLD_POINTER_SPACE) {
2778 Address src_slot = src; 2833 Address src_slot = src;
2779 Address dst_slot = dst; 2834 Address dst_slot = dst;
2780 ASSERT(IsAligned(size, kPointerSize)); 2835 ASSERT(IsAligned(size, kPointerSize));
2781 2836
2782 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2837 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2783 Object* value = Memory::Object_at(src_slot); 2838 Object* value = Memory::Object_at(src_slot);
2784 2839
2785 Memory::Object_at(dst_slot) = value; 2840 Memory::Object_at(dst_slot) = value;
2786 2841
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
2940 return String::cast(map_word.ToForwardingAddress()); 2995 return String::cast(map_word.ToForwardingAddress());
2941 } 2996 }
2942 2997
2943 return String::cast(*p); 2998 return String::cast(*p);
2944 } 2999 }
2945 3000
2946 3001
2947 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, 3002 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2948 int object_size) { 3003 int object_size) {
2949 // TODO(hpayer): Replace that check with an assert. 3004 // TODO(hpayer): Replace that check with an assert.
2950 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize); 3005 CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2951 3006
2952 OldSpace* target_space = heap()->TargetSpace(object); 3007 OldSpace* target_space = heap()->TargetSpace(object);
2953 3008
2954 ASSERT(target_space == heap()->old_pointer_space() || 3009 ASSERT(target_space == heap()->old_pointer_space() ||
2955 target_space == heap()->old_data_space()); 3010 target_space == heap()->old_data_space());
2956 Object* result; 3011 Object* result;
2957 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); 3012 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2958 if (maybe_result->ToObject(&result)) { 3013 if (maybe_result->ToObject(&result)) {
2959 HeapObject* target = HeapObject::cast(result); 3014 HeapObject* target = HeapObject::cast(result);
2960 MigrateObject(target->address(), 3015 MigrateObject(target->address(),
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
3047 *cell = 0; 3102 *cell = 0;
3048 } 3103 }
3049 p->ResetLiveBytes(); 3104 p->ResetLiveBytes();
3050 } 3105 }
3051 3106
3052 3107
3053 void MarkCompactCollector::EvacuatePages() { 3108 void MarkCompactCollector::EvacuatePages() {
3054 int npages = evacuation_candidates_.length(); 3109 int npages = evacuation_candidates_.length();
3055 for (int i = 0; i < npages; i++) { 3110 for (int i = 0; i < npages; i++) {
3056 Page* p = evacuation_candidates_[i]; 3111 Page* p = evacuation_candidates_[i];
3057 ASSERT(p->IsEvacuationCandidate() || 3112 // TODO(hpayer): This check is just used for debugging purpose and
3058 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3113 // should be removed or turned into an assert after investigating the
3114 // crash in concurrent sweeping.
3115 CHECK(p->IsEvacuationCandidate() ||
3116 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3117 CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
3059 if (p->IsEvacuationCandidate()) { 3118 if (p->IsEvacuationCandidate()) {
3060 // During compaction we might have to request a new page. 3119 // During compaction we might have to request a new page.
3061 // Check that space still have room for that. 3120 // Check that space still have room for that.
3062 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { 3121 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3063 EvacuateLiveObjectsFromPage(p); 3122 EvacuateLiveObjectsFromPage(p);
3064 } else { 3123 } else {
3065 // Without room for expansion evacuation is not guaranteed to succeed. 3124 // Without room for expansion evacuation is not guaranteed to succeed.
3066 // Pessimistically abandon unevacuated pages. 3125 // Pessimistically abandon unevacuated pages.
3067 for (int j = i; j < npages; j++) { 3126 for (int j = i; j < npages; j++) {
3068 Page* page = evacuation_candidates_[j]; 3127 Page* page = evacuation_candidates_[j];
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
3139 SWEEP_AND_VISIT_LIVE_OBJECTS 3198 SWEEP_AND_VISIT_LIVE_OBJECTS
3140 }; 3199 };
3141 3200
3142 3201
3143 enum SkipListRebuildingMode { 3202 enum SkipListRebuildingMode {
3144 REBUILD_SKIP_LIST, 3203 REBUILD_SKIP_LIST,
3145 IGNORE_SKIP_LIST 3204 IGNORE_SKIP_LIST
3146 }; 3205 };
3147 3206
3148 3207
3208 enum FreeSpaceTreatmentMode {
3209 IGNORE_FREE_SPACE,
3210 ZAP_FREE_SPACE
3211 };
3212
3213
3149 // Sweep a space precisely. After this has been done the space can 3214 // Sweep a space precisely. After this has been done the space can
3150 // be iterated precisely, hitting only the live objects. Code space 3215 // be iterated precisely, hitting only the live objects. Code space
3151 // is always swept precisely because we want to be able to iterate 3216 // is always swept precisely because we want to be able to iterate
3152 // over it. Map space is swept precisely, because it is not compacted. 3217 // over it. Map space is swept precisely, because it is not compacted.
3153 // Slots in live objects pointing into evacuation candidates are updated 3218 // Slots in live objects pointing into evacuation candidates are updated
3154 // if requested. 3219 // if requested.
3155 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode> 3220 template<SweepingMode sweeping_mode,
3221 SkipListRebuildingMode skip_list_mode,
3222 FreeSpaceTreatmentMode free_space_mode>
3156 static void SweepPrecisely(PagedSpace* space, 3223 static void SweepPrecisely(PagedSpace* space,
3157 Page* p, 3224 Page* p,
3158 ObjectVisitor* v) { 3225 ObjectVisitor* v) {
3159 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3226 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3160 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3227 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3161 space->identity() == CODE_SPACE); 3228 space->identity() == CODE_SPACE);
3162 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3229 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3163 3230
3164 double start_time = 0.0; 3231 double start_time = 0.0;
3165 if (FLAG_print_cumulative_gc_stat) { 3232 if (FLAG_print_cumulative_gc_stat) {
(...skipping 13 matching lines...) Expand all
3179 } 3246 }
3180 3247
3181 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3248 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3182 Address cell_base = it.CurrentCellBase(); 3249 Address cell_base = it.CurrentCellBase();
3183 MarkBit::CellType* cell = it.CurrentCell(); 3250 MarkBit::CellType* cell = it.CurrentCell();
3184 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3251 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3185 int live_index = 0; 3252 int live_index = 0;
3186 for ( ; live_objects != 0; live_objects--) { 3253 for ( ; live_objects != 0; live_objects--) {
3187 Address free_end = cell_base + offsets[live_index++] * kPointerSize; 3254 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3188 if (free_end != free_start) { 3255 if (free_end != free_start) {
3256 if (free_space_mode == ZAP_FREE_SPACE) {
3257 memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
3258 }
3189 space->Free(free_start, static_cast<int>(free_end - free_start)); 3259 space->Free(free_start, static_cast<int>(free_end - free_start));
3190 #ifdef ENABLE_GDB_JIT_INTERFACE 3260 #ifdef ENABLE_GDB_JIT_INTERFACE
3191 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3261 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3192 GDBJITInterface::RemoveCodeRange(free_start, free_end); 3262 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3193 } 3263 }
3194 #endif 3264 #endif
3195 } 3265 }
3196 HeapObject* live_object = HeapObject::FromAddress(free_end); 3266 HeapObject* live_object = HeapObject::FromAddress(free_end);
3197 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); 3267 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3198 Map* map = live_object->map(); 3268 Map* map = live_object->map();
(...skipping 11 matching lines...) Expand all
3210 skip_list->AddObject(free_end, size); 3280 skip_list->AddObject(free_end, size);
3211 curr_region = new_region_end; 3281 curr_region = new_region_end;
3212 } 3282 }
3213 } 3283 }
3214 free_start = free_end + size; 3284 free_start = free_end + size;
3215 } 3285 }
3216 // Clear marking bits for current cell. 3286 // Clear marking bits for current cell.
3217 *cell = 0; 3287 *cell = 0;
3218 } 3288 }
3219 if (free_start != p->area_end()) { 3289 if (free_start != p->area_end()) {
3290 if (free_space_mode == ZAP_FREE_SPACE) {
3291 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
3292 }
3220 space->Free(free_start, static_cast<int>(p->area_end() - free_start)); 3293 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3221 #ifdef ENABLE_GDB_JIT_INTERFACE 3294 #ifdef ENABLE_GDB_JIT_INTERFACE
3222 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3295 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3223 GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); 3296 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3224 } 3297 }
3225 #endif 3298 #endif
3226 } 3299 }
3227 p->ResetLiveBytes(); 3300 p->ResetLiveBytes();
3228 if (FLAG_print_cumulative_gc_stat) { 3301 if (FLAG_print_cumulative_gc_stat) {
3229 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time); 3302 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
3355 3428
3356 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3429 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3357 Heap::RelocationLock relocation_lock(heap()); 3430 Heap::RelocationLock relocation_lock(heap());
3358 3431
3359 bool code_slots_filtering_required; 3432 bool code_slots_filtering_required;
3360 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 3433 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3361 code_slots_filtering_required = MarkInvalidatedCode(); 3434 code_slots_filtering_required = MarkInvalidatedCode();
3362 EvacuateNewSpace(); 3435 EvacuateNewSpace();
3363 } 3436 }
3364 3437
3365 // We have to travers our allocation sites scratchpad which contains raw
3366 // pointers before we move objects. During new space evacauation we
3367 // gathered pretenuring statistics. The found allocation sites may not be
3368 // valid after compacting old space.
3369 heap()->ProcessPretenuringFeedback();
3370
3371
3372 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES); 3438 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3373 EvacuatePages(); 3439 EvacuatePages();
3374 } 3440 }
3375 3441
3376 // Second pass: find pointers to new space and update them. 3442 // Second pass: find pointers to new space and update them.
3377 PointersUpdatingVisitor updating_visitor(heap()); 3443 PointersUpdatingVisitor updating_visitor(heap());
3378 3444
3379 { GCTracer::Scope gc_scope(tracer_, 3445 { GCTracer::Scope gc_scope(tracer_,
3380 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); 3446 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3381 // Update pointers in to space. 3447 // Update pointers in to space.
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
3462 reinterpret_cast<intptr_t>(p)); 3528 reinterpret_cast<intptr_t>(p));
3463 } 3529 }
3464 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3530 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3465 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3531 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3466 3532
3467 switch (space->identity()) { 3533 switch (space->identity()) {
3468 case OLD_DATA_SPACE: 3534 case OLD_DATA_SPACE:
3469 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); 3535 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3470 break; 3536 break;
3471 case OLD_POINTER_SPACE: 3537 case OLD_POINTER_SPACE:
3472 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( 3538 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3539 IGNORE_SKIP_LIST,
3540 IGNORE_FREE_SPACE>(
3473 space, p, &updating_visitor); 3541 space, p, &updating_visitor);
3474 break; 3542 break;
3475 case CODE_SPACE: 3543 case CODE_SPACE:
3476 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( 3544 if (FLAG_zap_code_space) {
3477 space, p, &updating_visitor); 3545 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3546 REBUILD_SKIP_LIST,
3547 ZAP_FREE_SPACE>(
3548 space, p, &updating_visitor);
3549 } else {
3550 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3551 REBUILD_SKIP_LIST,
3552 IGNORE_FREE_SPACE>(
3553 space, p, &updating_visitor);
3554 }
3478 break; 3555 break;
3479 default: 3556 default:
3480 UNREACHABLE(); 3557 UNREACHABLE();
3481 break; 3558 break;
3482 } 3559 }
3483 } 3560 }
3484 } 3561 }
3485 } 3562 }
3486 3563
3487 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS); 3564 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
3879 // spaces have been put on the free list and the smaller ones have been 3956 // spaces have been put on the free list and the smaller ones have been
3880 // ignored and left untouched. A free space is always either ignored or put 3957 // ignored and left untouched. A free space is always either ignored or put
3881 // on the free list, never split up into two parts. This is important 3958 // on the free list, never split up into two parts. This is important
3882 // because it means that any FreeSpace maps left actually describe a region of 3959 // because it means that any FreeSpace maps left actually describe a region of
3883 // memory that can be ignored when scanning. Dead objects other than free 3960 // memory that can be ignored when scanning. Dead objects other than free
3884 // spaces will not contain the free space map. 3961 // spaces will not contain the free space map.
3885 template<MarkCompactCollector::SweepingParallelism mode> 3962 template<MarkCompactCollector::SweepingParallelism mode>
3886 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, 3963 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3887 FreeList* free_list, 3964 FreeList* free_list,
3888 Page* p) { 3965 Page* p) {
3889 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3966 // TODO(hpayer): This check is just used for debugging purpose and
3967 // should be removed or turned into an assert after investigating the
3968 // crash in concurrent sweeping.
3969 CHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3890 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && 3970 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3891 free_list != NULL) || 3971 free_list != NULL) ||
3892 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && 3972 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3893 free_list == NULL)); 3973 free_list == NULL));
3894 3974
3895 p->MarkSweptConservatively(); 3975 // When parallel sweeping is active, the page will be marked after
3976 // sweeping by the main thread.
3977 if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
3978 p->MarkSweptConservatively();
3979 }
3896 3980
3897 intptr_t freed_bytes = 0; 3981 intptr_t freed_bytes = 0;
3898 size_t size = 0; 3982 size_t size = 0;
3899 3983
3900 // Skip over all the dead objects at the start of the page and mark them free. 3984 // Skip over all the dead objects at the start of the page and mark them free.
3901 Address cell_base = 0; 3985 Address cell_base = 0;
3902 MarkBit::CellType* cell = NULL; 3986 MarkBit::CellType* cell = NULL;
3903 MarkBitCellIterator it(p); 3987 MarkBitCellIterator it(p);
3904 for (; !it.Done(); it.Advance()) { 3988 for (; !it.Done(); it.Advance()) {
3905 cell_base = it.CurrentCellBase(); 3989 cell_base = it.CurrentCellBase();
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
3963 free_start = DigestFreeStart(free_start, free_start_cell); 4047 free_start = DigestFreeStart(free_start, free_start_cell);
3964 freed_bytes += Free<mode>(space, free_list, free_start, 4048 freed_bytes += Free<mode>(space, free_list, free_start,
3965 static_cast<int>(p->area_end() - free_start)); 4049 static_cast<int>(p->area_end() - free_start));
3966 } 4050 }
3967 4051
3968 p->ResetLiveBytes(); 4052 p->ResetLiveBytes();
3969 return freed_bytes; 4053 return freed_bytes;
3970 } 4054 }
3971 4055
3972 4056
3973 void MarkCompactCollector::SweepInParallel(PagedSpace* space, 4057 void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
3974 FreeList* private_free_list,
3975 FreeList* free_list) {
3976 PageIterator it(space); 4058 PageIterator it(space);
4059 FreeList* free_list = space == heap()->old_pointer_space()
4060 ? free_list_old_pointer_space_.get()
4061 : free_list_old_data_space_.get();
4062 FreeList private_free_list(space);
3977 while (it.has_next()) { 4063 while (it.has_next()) {
3978 Page* p = it.next(); 4064 Page* p = it.next();
3979 4065
3980 if (p->TryParallelSweeping()) { 4066 if (p->TryParallelSweeping()) {
3981 SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p); 4067 SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
3982 free_list->Concatenate(private_free_list); 4068 free_list->Concatenate(&private_free_list);
3983 } 4069 }
3984 } 4070 }
3985 } 4071 }
3986 4072
3987 4073
3988 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { 4074 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3989 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || 4075 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3990 sweeper == LAZY_CONSERVATIVE || 4076 sweeper == LAZY_CONSERVATIVE ||
3991 sweeper == PARALLEL_CONSERVATIVE || 4077 sweeper == PARALLEL_CONSERVATIVE ||
3992 sweeper == CONCURRENT_CONSERVATIVE); 4078 sweeper == CONCURRENT_CONSERVATIVE);
3993 space->ClearStats(); 4079 space->ClearStats();
3994 4080
3995 PageIterator it(space); 4081 PageIterator it(space);
3996 4082
3997 int pages_swept = 0; 4083 int pages_swept = 0;
3998 bool lazy_sweeping_active = false; 4084 bool lazy_sweeping_active = false;
3999 bool unused_page_present = false; 4085 bool unused_page_present = false;
4000 bool parallel_sweeping_active = false; 4086 bool parallel_sweeping_active = false;
4001 4087
4002 while (it.has_next()) { 4088 while (it.has_next()) {
4003 Page* p = it.next(); 4089 Page* p = it.next();
4004 4090
4005 ASSERT(p->parallel_sweeping() == 0); 4091 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4006 ASSERT(!p->IsEvacuationCandidate()); 4092 ASSERT(!p->IsEvacuationCandidate());
4007 4093
4008 // Clear sweeping flags indicating that marking bits are still intact. 4094 // Clear sweeping flags indicating that marking bits are still intact.
4009 p->ClearSweptPrecisely(); 4095 p->ClearSweptPrecisely();
4010 p->ClearSweptConservatively(); 4096 p->ClearSweptConservatively();
4011 4097
4012 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 4098 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
4013 // Will be processed in EvacuateNewSpaceAndCandidates. 4099 // Will be processed in EvacuateNewSpaceAndCandidates.
4014 ASSERT(evacuation_candidates_.length() > 0); 4100 ASSERT(evacuation_candidates_.length() > 0);
4015 continue; 4101 continue;
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
4068 reinterpret_cast<intptr_t>(p)); 4154 reinterpret_cast<intptr_t>(p));
4069 } 4155 }
4070 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); 4156 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4071 pages_swept++; 4157 pages_swept++;
4072 parallel_sweeping_active = true; 4158 parallel_sweeping_active = true;
4073 } else { 4159 } else {
4074 if (FLAG_gc_verbose) { 4160 if (FLAG_gc_verbose) {
4075 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", 4161 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4076 reinterpret_cast<intptr_t>(p)); 4162 reinterpret_cast<intptr_t>(p));
4077 } 4163 }
4078 p->set_parallel_sweeping(1); 4164 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
4079 space->IncreaseUnsweptFreeBytes(p); 4165 space->IncreaseUnsweptFreeBytes(p);
4080 } 4166 }
4081 break; 4167 break;
4082 } 4168 }
4083 case PRECISE: { 4169 case PRECISE: {
4084 if (FLAG_gc_verbose) { 4170 if (FLAG_gc_verbose) {
4085 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", 4171 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4086 reinterpret_cast<intptr_t>(p)); 4172 reinterpret_cast<intptr_t>(p));
4087 } 4173 }
4088 if (space->identity() == CODE_SPACE) { 4174 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4089 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); 4175 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
4176 space, p, NULL);
4177 } else if (space->identity() == CODE_SPACE) {
4178 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
4179 space, p, NULL);
4090 } else { 4180 } else {
4091 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); 4181 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4182 space, p, NULL);
4092 } 4183 }
4093 pages_swept++; 4184 pages_swept++;
4094 break; 4185 break;
4095 } 4186 }
4096 default: { 4187 default: {
4097 UNREACHABLE(); 4188 UNREACHABLE();
4098 } 4189 }
4099 } 4190 }
4100 } 4191 }
4101 4192
4102 if (FLAG_gc_verbose) { 4193 if (FLAG_gc_verbose) {
4103 PrintF("SweepSpace: %s (%d pages swept)\n", 4194 PrintF("SweepSpace: %s (%d pages swept)\n",
4104 AllocationSpaceName(space->identity()), 4195 AllocationSpaceName(space->identity()),
4105 pages_swept); 4196 pages_swept);
4106 } 4197 }
4107 4198
4108 // Give pages that are queued to be freed back to the OS. 4199 // Give pages that are queued to be freed back to the OS.
4109 heap()->FreeQueuedChunks(); 4200 heap()->FreeQueuedChunks();
4110 } 4201 }
4111 4202
4112 4203
4113 void MarkCompactCollector::SweepSpaces() { 4204 void MarkCompactCollector::SweepSpaces() {
4114 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 4205 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4115 #ifdef DEBUG 4206 #ifdef DEBUG
4116 state_ = SWEEP_SPACES; 4207 state_ = SWEEP_SPACES;
4117 #endif 4208 #endif
4118 SweeperType how_to_sweep = 4209 SweeperType how_to_sweep =
4119 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; 4210 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4120 if (isolate()->num_sweeper_threads() > 0) { 4211 if (AreSweeperThreadsActivated()) {
4121 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; 4212 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4122 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; 4213 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4123 } 4214 }
4124 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
4125 if (sweep_precisely_) how_to_sweep = PRECISE; 4215 if (sweep_precisely_) how_to_sweep = PRECISE;
4126 4216
4127 // Unlink evacuation candidates before sweeper threads access the list of 4217 // Unlink evacuation candidates before sweeper threads access the list of
4128 // pages to avoid race condition. 4218 // pages to avoid race condition.
4129 UnlinkEvacuationCandidates(); 4219 UnlinkEvacuationCandidates();
4130 4220
4131 // Noncompacting collections simply sweep the spaces to clear the mark 4221 // Noncompacting collections simply sweep the spaces to clear the mark
4132 // bits and free the nonlive blocks (for old and map spaces). We sweep 4222 // bits and free the nonlive blocks (for old and map spaces). We sweep
4133 // the map space last because freeing non-live maps overwrites them and 4223 // the map space last because freeing non-live maps overwrites them and
4134 // the other spaces rely on possibly non-live maps to get the sizes for 4224 // the other spaces rely on possibly non-live maps to get the sizes for
(...skipping 26 matching lines...) Expand all
4161 SweepSpace(heap()->map_space(), PRECISE); 4251 SweepSpace(heap()->map_space(), PRECISE);
4162 4252
4163 // Deallocate unmarked objects and clear marked bits for marked objects. 4253 // Deallocate unmarked objects and clear marked bits for marked objects.
4164 heap_->lo_space()->FreeUnmarkedObjects(); 4254 heap_->lo_space()->FreeUnmarkedObjects();
4165 4255
4166 // Deallocate evacuated candidate pages. 4256 // Deallocate evacuated candidate pages.
4167 ReleaseEvacuationCandidates(); 4257 ReleaseEvacuationCandidates();
4168 } 4258 }
4169 4259
4170 4260
4261 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4262 PageIterator it(space);
4263 while (it.has_next()) {
4264 Page* p = it.next();
4265 if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) {
4266 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
4267 p->MarkSweptConservatively();
4268 }
4269 }
4270 }
4271
4272
4273 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4274 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4275 ParallelSweepSpaceComplete(heap()->old_data_space());
4276 }
4277
4278
4171 void MarkCompactCollector::EnableCodeFlushing(bool enable) { 4279 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4172 #ifdef ENABLE_DEBUGGER_SUPPORT 4280 #ifdef ENABLE_DEBUGGER_SUPPORT
4173 if (isolate()->debug()->IsLoaded() || 4281 if (isolate()->debug()->IsLoaded() ||
4174 isolate()->debug()->has_break_points()) { 4282 isolate()->debug()->has_break_points()) {
4175 enable = false; 4283 enable = false;
4176 } 4284 }
4177 #endif 4285 #endif
4178 4286
4179 if (enable) { 4287 if (enable) {
4180 if (code_flusher_ != NULL) return; 4288 if (code_flusher_ != NULL) return;
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
4369 while (buffer != NULL) { 4477 while (buffer != NULL) {
4370 SlotsBuffer* next_buffer = buffer->next(); 4478 SlotsBuffer* next_buffer = buffer->next();
4371 DeallocateBuffer(buffer); 4479 DeallocateBuffer(buffer);
4372 buffer = next_buffer; 4480 buffer = next_buffer;
4373 } 4481 }
4374 *buffer_address = NULL; 4482 *buffer_address = NULL;
4375 } 4483 }
4376 4484
4377 4485
4378 } } // namespace v8::internal 4486 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/messages.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698