OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
51 page_parallel_job_semaphore_(0), | 51 page_parallel_job_semaphore_(0), |
52 #ifdef DEBUG | 52 #ifdef DEBUG |
53 state_(IDLE), | 53 state_(IDLE), |
54 #endif | 54 #endif |
55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
57 evacuation_(false), | 57 evacuation_(false), |
58 compacting_(false), | 58 compacting_(false), |
59 black_allocation_(false), | 59 black_allocation_(false), |
60 have_code_to_deoptimize_(false), | 60 have_code_to_deoptimize_(false), |
| 61 marking_deque_(heap), |
61 code_flusher_(nullptr), | 62 code_flusher_(nullptr), |
62 sweeper_(heap) { | 63 sweeper_(heap) { |
63 } | 64 } |
64 | 65 |
65 #ifdef VERIFY_HEAP | 66 #ifdef VERIFY_HEAP |
66 class VerifyMarkingVisitor : public ObjectVisitor { | 67 class VerifyMarkingVisitor : public ObjectVisitor { |
67 public: | 68 public: |
68 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 69 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
69 | 70 |
70 void VisitPointers(Object** start, Object** end) override { | 71 void VisitPointers(Object** start, Object** end) override { |
(...skipping 2039 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2110 } | 2111 } |
2111 | 2112 |
2112 void MarkingDeque::SetUp() { | 2113 void MarkingDeque::SetUp() { |
2113 backing_store_ = new base::VirtualMemory(kMaxSize); | 2114 backing_store_ = new base::VirtualMemory(kMaxSize); |
2114 backing_store_committed_size_ = 0; | 2115 backing_store_committed_size_ = 0; |
2115 if (backing_store_ == nullptr) { | 2116 if (backing_store_ == nullptr) { |
2116 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); | 2117 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); |
2117 } | 2118 } |
2118 } | 2119 } |
2119 | 2120 |
2120 void MarkingDeque::TearDown() { delete backing_store_; } | 2121 void MarkingDeque::TearDown() { |
| 2122 CancelOrWaitForUncommitTask(); |
| 2123 delete backing_store_; |
| 2124 } |
2121 | 2125 |
2122 void MarkingDeque::StartUsing() { | 2126 void MarkingDeque::StartUsing() { |
| 2127 base::LockGuard<base::Mutex> guard(&mutex_); |
2123 if (in_use_) { | 2128 if (in_use_) { |
2124 // This can happen in mark-compact GC if the incremental marker already | 2129 // This can happen in mark-compact GC if the incremental marker already |
2125 // started using the marking deque. | 2130 // started using the marking deque. |
2126 return; | 2131 return; |
2127 } | 2132 } |
2128 in_use_ = true; | 2133 in_use_ = true; |
2129 EnsureCommitted(); | 2134 EnsureCommitted(); |
2130 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); | 2135 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); |
2131 size_t size = FLAG_force_marking_deque_overflows | 2136 size_t size = FLAG_force_marking_deque_overflows |
2132 ? 64 * kPointerSize | 2137 ? 64 * kPointerSize |
2133 : backing_store_committed_size_; | 2138 : backing_store_committed_size_; |
2134 DCHECK( | 2139 DCHECK( |
2135 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); | 2140 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); |
2136 mask_ = static_cast<int>((size / kPointerSize) - 1); | 2141 mask_ = static_cast<int>((size / kPointerSize) - 1); |
2137 top_ = bottom_ = 0; | 2142 top_ = bottom_ = 0; |
2138 overflowed_ = false; | 2143 overflowed_ = false; |
2139 } | 2144 } |
2140 | 2145 |
2141 void MarkingDeque::StopUsing() { | 2146 void MarkingDeque::StopUsing() { |
| 2147 base::LockGuard<base::Mutex> guard(&mutex_); |
2142 DCHECK(IsEmpty()); | 2148 DCHECK(IsEmpty()); |
2143 DCHECK(!overflowed_); | 2149 DCHECK(!overflowed_); |
2144 top_ = bottom_ = mask_ = 0; | 2150 top_ = bottom_ = mask_ = 0; |
2145 Uncommit(); | |
2146 in_use_ = false; | 2151 in_use_ = false; |
| 2152 if (FLAG_concurrent_sweeping) { |
| 2153 StartUncommitTask(); |
| 2154 } else { |
| 2155 Uncommit(); |
| 2156 } |
2147 } | 2157 } |
2148 | 2158 |
2149 void MarkingDeque::Clear() { | 2159 void MarkingDeque::Clear() { |
2150 DCHECK(in_use_); | 2160 DCHECK(in_use_); |
2151 top_ = bottom_ = 0; | 2161 top_ = bottom_ = 0; |
2152 overflowed_ = false; | 2162 overflowed_ = false; |
2153 } | 2163 } |
2154 | 2164 |
2155 void MarkingDeque::Uncommit() { | 2165 void MarkingDeque::Uncommit() { |
2156 DCHECK(in_use_); | 2166 DCHECK(!in_use_); |
2157 bool success = backing_store_->Uncommit(backing_store_->address(), | 2167 bool success = backing_store_->Uncommit(backing_store_->address(), |
2158 backing_store_committed_size_); | 2168 backing_store_committed_size_); |
2159 backing_store_committed_size_ = 0; | 2169 backing_store_committed_size_ = 0; |
2160 CHECK(success); | 2170 CHECK(success); |
2161 } | 2171 } |
2162 | 2172 |
2163 void MarkingDeque::EnsureCommitted() { | 2173 void MarkingDeque::EnsureCommitted() { |
2164 DCHECK(in_use_); | 2174 DCHECK(in_use_); |
2165 if (backing_store_committed_size_ > 0) return; | 2175 if (backing_store_committed_size_ > 0) return; |
2166 | 2176 |
2167 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { | 2177 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { |
2168 if (backing_store_->Commit(backing_store_->address(), size, false)) { | 2178 if (backing_store_->Commit(backing_store_->address(), size, false)) { |
2169 backing_store_committed_size_ = size; | 2179 backing_store_committed_size_ = size; |
2170 break; | 2180 break; |
2171 } | 2181 } |
2172 } | 2182 } |
2173 if (backing_store_committed_size_ == 0) { | 2183 if (backing_store_committed_size_ == 0) { |
2174 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); | 2184 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); |
2175 } | 2185 } |
2176 } | 2186 } |
2177 | 2187 |
| 2188 void MarkingDeque::StartUncommitTask() { |
| 2189 if (!uncommit_task_pending_) { |
| 2190 UncommitTask* task = new UncommitTask(heap_->isolate(), this); |
| 2191 uncommit_task_id_ = task->id(); |
| 2192 uncommit_task_pending_ = true; |
| 2193 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 2194 task, v8::Platform::kShortRunningTask); |
| 2195 } |
| 2196 } |
| 2197 |
| 2198 void MarkingDeque::CancelOrWaitForUncommitTask() { |
| 2199 base::LockGuard<base::Mutex> guard(&mutex_); |
| 2200 if (!uncommit_task_pending_ || |
| 2201 heap_->isolate()->cancelable_task_manager()->TryAbort( |
| 2202 uncommit_task_id_) != CancelableTaskManager::kTaskRunning) { |
| 2203 return; |
| 2204 } |
| 2205 while (uncommit_task_pending_) { |
| 2206 uncommit_task_barrier_.Wait(&mutex_); |
| 2207 } |
| 2208 } |
| 2209 |
2178 class MarkCompactCollector::ObjectStatsVisitor | 2210 class MarkCompactCollector::ObjectStatsVisitor |
2179 : public MarkCompactCollector::HeapObjectVisitor { | 2211 : public MarkCompactCollector::HeapObjectVisitor { |
2180 public: | 2212 public: |
2181 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, | 2213 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, |
2182 ObjectStats* dead_stats) | 2214 ObjectStats* dead_stats) |
2183 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { | 2215 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { |
2184 DCHECK_NOT_NULL(live_stats); | 2216 DCHECK_NOT_NULL(live_stats); |
2185 DCHECK_NOT_NULL(dead_stats); | 2217 DCHECK_NOT_NULL(dead_stats); |
2186 // Global objects are roots and thus recorded as live. | 2218 // Global objects are roots and thus recorded as live. |
2187 live_collector_.CollectGlobalStatistics(); | 2219 live_collector_.CollectGlobalStatistics(); |
(...skipping 1706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3894 // The target is always in old space, we don't have to record the slot in | 3926 // The target is always in old space, we don't have to record the slot in |
3895 // the old-to-new remembered set. | 3927 // the old-to-new remembered set. |
3896 DCHECK(!heap()->InNewSpace(target)); | 3928 DCHECK(!heap()->InNewSpace(target)); |
3897 RecordRelocSlot(host, &rinfo, target); | 3929 RecordRelocSlot(host, &rinfo, target); |
3898 } | 3930 } |
3899 } | 3931 } |
3900 } | 3932 } |
3901 | 3933 |
3902 } // namespace internal | 3934 } // namespace internal |
3903 } // namespace v8 | 3935 } // namespace v8 |
OLD | NEW |