OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
51 page_parallel_job_semaphore_(0), | 51 page_parallel_job_semaphore_(0), |
52 #ifdef DEBUG | 52 #ifdef DEBUG |
53 state_(IDLE), | 53 state_(IDLE), |
54 #endif | 54 #endif |
55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
57 evacuation_(false), | 57 evacuation_(false), |
58 compacting_(false), | 58 compacting_(false), |
59 black_allocation_(false), | 59 black_allocation_(false), |
60 have_code_to_deoptimize_(false), | 60 have_code_to_deoptimize_(false), |
61 marking_deque_(heap), | |
61 code_flusher_(nullptr), | 62 code_flusher_(nullptr), |
62 sweeper_(heap) { | 63 sweeper_(heap) { |
63 } | 64 } |
64 | 65 |
65 #ifdef VERIFY_HEAP | 66 #ifdef VERIFY_HEAP |
66 class VerifyMarkingVisitor : public ObjectVisitor { | 67 class VerifyMarkingVisitor : public ObjectVisitor { |
67 public: | 68 public: |
68 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 69 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
69 | 70 |
70 void VisitPointers(Object** start, Object** end) override { | 71 void VisitPointers(Object** start, Object** end) override { |
(...skipping 2037 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2108 } | 2109 } |
2109 | 2110 |
2110 void MarkingDeque::SetUp() { | 2111 void MarkingDeque::SetUp() { |
2111 backing_store_ = new base::VirtualMemory(kMaxSize); | 2112 backing_store_ = new base::VirtualMemory(kMaxSize); |
2112 backing_store_committed_size_ = 0; | 2113 backing_store_committed_size_ = 0; |
2113 if (backing_store_ == nullptr) { | 2114 if (backing_store_ == nullptr) { |
2114 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); | 2115 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); |
2115 } | 2116 } |
2116 } | 2117 } |
2117 | 2118 |
2118 void MarkingDeque::TearDown() { delete backing_store_; } | 2119 void MarkingDeque::TearDown() { |
2120 CancelOrWaitForUncommitTask(); | |
2121 delete backing_store_; | |
2122 } | |
2119 | 2123 |
2120 void MarkingDeque::StartUsing() { | 2124 void MarkingDeque::StartUsing() { |
2125 base::LockGuard<base::Mutex> guard(&mutex_); | |
2121 if (in_use_) { | 2126 if (in_use_) { |
2122 // This can happen in mark-compact GC if the incremental marker already | 2127 // This can happen in mark-compact GC if the incremental marker already |
2123 // started using the marking deque. | 2128 // started using the marking deque. |
Michael Lippautz
2016/10/24 13:34:03
nit: This comment requires updating.
| |
2124 return; | 2129 return; |
2125 } | 2130 } |
2126 in_use_ = true; | 2131 in_use_ = true; |
2127 EnsureCommitted(); | 2132 EnsureCommitted(); |
2128 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); | 2133 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); |
2129 size_t size = FLAG_force_marking_deque_overflows | 2134 size_t size = FLAG_force_marking_deque_overflows |
2130 ? 64 * kPointerSize | 2135 ? 64 * kPointerSize |
2131 : backing_store_committed_size_; | 2136 : backing_store_committed_size_; |
2132 DCHECK( | 2137 DCHECK( |
2133 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); | 2138 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); |
2134 mask_ = static_cast<int>((size / kPointerSize) - 1); | 2139 mask_ = static_cast<int>((size / kPointerSize) - 1); |
2135 top_ = bottom_ = 0; | 2140 top_ = bottom_ = 0; |
2136 overflowed_ = false; | 2141 overflowed_ = false; |
2137 } | 2142 } |
2138 | 2143 |
2139 void MarkingDeque::StopUsing() { | 2144 void MarkingDeque::StopUsing() { |
2145 base::LockGuard<base::Mutex> guard(&mutex_); | |
2140 DCHECK(IsEmpty()); | 2146 DCHECK(IsEmpty()); |
2141 DCHECK(!overflowed_); | 2147 DCHECK(!overflowed_); |
2142 top_ = bottom_ = mask_ = 0; | 2148 top_ = bottom_ = mask_ = 0; |
2143 Uncommit(); | |
2144 in_use_ = false; | 2149 in_use_ = false; |
2150 if (FLAG_concurrent_sweeping) { | |
2151 StartUncommitTask(); | |
2152 } else { | |
2153 Uncommit(); | |
2154 } | |
2145 } | 2155 } |
2146 | 2156 |
2147 void MarkingDeque::Clear() { | 2157 void MarkingDeque::Clear() { |
2148 DCHECK(in_use_); | 2158 DCHECK(in_use_); |
2149 top_ = bottom_ = 0; | 2159 top_ = bottom_ = 0; |
2150 overflowed_ = false; | 2160 overflowed_ = false; |
2151 } | 2161 } |
2152 | 2162 |
2153 void MarkingDeque::Uncommit() { | 2163 void MarkingDeque::Uncommit() { |
2154 DCHECK(in_use_); | 2164 DCHECK(!in_use_); |
2155 bool success = backing_store_->Uncommit(backing_store_->address(), | 2165 bool success = backing_store_->Uncommit(backing_store_->address(), |
2156 backing_store_committed_size_); | 2166 backing_store_committed_size_); |
2157 backing_store_committed_size_ = 0; | 2167 backing_store_committed_size_ = 0; |
2158 CHECK(success); | 2168 CHECK(success); |
2159 } | 2169 } |
2160 | 2170 |
2161 void MarkingDeque::EnsureCommitted() { | 2171 void MarkingDeque::EnsureCommitted() { |
2162 DCHECK(in_use_); | 2172 DCHECK(in_use_); |
2163 if (backing_store_committed_size_ > 0) return; | 2173 if (backing_store_committed_size_ > 0) return; |
2164 | 2174 |
2165 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { | 2175 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { |
2166 if (backing_store_->Commit(backing_store_->address(), size, false)) { | 2176 if (backing_store_->Commit(backing_store_->address(), size, false)) { |
2167 backing_store_committed_size_ = size; | 2177 backing_store_committed_size_ = size; |
2168 break; | 2178 break; |
2169 } | 2179 } |
2170 } | 2180 } |
2171 if (backing_store_committed_size_ == 0) { | 2181 if (backing_store_committed_size_ == 0) { |
2172 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); | 2182 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); |
2173 } | 2183 } |
2174 } | 2184 } |
2175 | 2185 |
2186 void MarkingDeque::StartUncommitTask() { | |
2187 if (!uncommit_task_pending_) { | |
2188 UncommitTask* task = new UncommitTask(heap_->isolate(), this); | |
2189 uncommit_task_id_ = task->id(); | |
2190 uncommit_task_pending_ = true; | |
2191 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
2192 task, v8::Platform::kShortRunningTask); | |
2193 } | |
2194 } | |
2195 | |
2196 void MarkingDeque::CancelOrWaitForUncommitTask() { | |
2197 base::LockGuard<base::Mutex> guard(&mutex_); | |
2198 if (!uncommit_task_pending_ || | |
2199 heap_->isolate()->cancelable_task_manager()->TryAbort( | |
2200 uncommit_task_id_)) { | |
2201 return; | |
2202 } | |
2203 while (uncommit_task_pending_) { | |
2204 uncommit_task_barrier_.Wait(&mutex_); | |
2205 } | |
2206 } | |
2207 | |
2176 class MarkCompactCollector::ObjectStatsVisitor | 2208 class MarkCompactCollector::ObjectStatsVisitor |
2177 : public MarkCompactCollector::HeapObjectVisitor { | 2209 : public MarkCompactCollector::HeapObjectVisitor { |
2178 public: | 2210 public: |
2179 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, | 2211 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, |
2180 ObjectStats* dead_stats) | 2212 ObjectStats* dead_stats) |
2181 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { | 2213 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { |
2182 DCHECK_NOT_NULL(live_stats); | 2214 DCHECK_NOT_NULL(live_stats); |
2183 DCHECK_NOT_NULL(dead_stats); | 2215 DCHECK_NOT_NULL(dead_stats); |
2184 // Global objects are roots and thus recorded as live. | 2216 // Global objects are roots and thus recorded as live. |
2185 live_collector_.CollectGlobalStatistics(); | 2217 live_collector_.CollectGlobalStatistics(); |
(...skipping 1706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3892 // The target is always in old space, we don't have to record the slot in | 3924 // The target is always in old space, we don't have to record the slot in |
3893 // the old-to-new remembered set. | 3925 // the old-to-new remembered set. |
3894 DCHECK(!heap()->InNewSpace(target)); | 3926 DCHECK(!heap()->InNewSpace(target)); |
3895 RecordRelocSlot(host, &rinfo, target); | 3927 RecordRelocSlot(host, &rinfo, target); |
3896 } | 3928 } |
3897 } | 3929 } |
3898 } | 3930 } |
3899 | 3931 |
3900 } // namespace internal | 3932 } // namespace internal |
3901 } // namespace v8 | 3933 } // namespace v8 |
OLD | NEW |