| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 51 page_parallel_job_semaphore_(0), | 51 page_parallel_job_semaphore_(0), |
| 52 #ifdef DEBUG | 52 #ifdef DEBUG |
| 53 state_(IDLE), | 53 state_(IDLE), |
| 54 #endif | 54 #endif |
| 55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
| 56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
| 57 evacuation_(false), | 57 evacuation_(false), |
| 58 compacting_(false), | 58 compacting_(false), |
| 59 black_allocation_(false), | 59 black_allocation_(false), |
| 60 have_code_to_deoptimize_(false), | 60 have_code_to_deoptimize_(false), |
| 61 marking_deque_(heap), | |
| 62 code_flusher_(nullptr), | 61 code_flusher_(nullptr), |
| 63 sweeper_(heap) { | 62 sweeper_(heap) { |
| 64 } | 63 } |
| 65 | 64 |
| 66 #ifdef VERIFY_HEAP | 65 #ifdef VERIFY_HEAP |
| 67 class VerifyMarkingVisitor : public ObjectVisitor { | 66 class VerifyMarkingVisitor : public ObjectVisitor { |
| 68 public: | 67 public: |
| 69 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 68 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
| 70 | 69 |
| 71 void VisitPointers(Object** start, Object** end) override { | 70 void VisitPointers(Object** start, Object** end) override { |
| (...skipping 2037 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2109 } | 2108 } |
| 2110 | 2109 |
| 2111 void MarkingDeque::SetUp() { | 2110 void MarkingDeque::SetUp() { |
| 2112 backing_store_ = new base::VirtualMemory(kMaxSize); | 2111 backing_store_ = new base::VirtualMemory(kMaxSize); |
| 2113 backing_store_committed_size_ = 0; | 2112 backing_store_committed_size_ = 0; |
| 2114 if (backing_store_ == nullptr) { | 2113 if (backing_store_ == nullptr) { |
| 2115 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); | 2114 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp"); |
| 2116 } | 2115 } |
| 2117 } | 2116 } |
| 2118 | 2117 |
| 2119 void MarkingDeque::TearDown() { | 2118 void MarkingDeque::TearDown() { delete backing_store_; } |
| 2120 CancelOrWaitForUncommitTask(); | |
| 2121 delete backing_store_; | |
| 2122 } | |
| 2123 | 2119 |
| 2124 void MarkingDeque::StartUsing() { | 2120 void MarkingDeque::StartUsing() { |
| 2125 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 2126 if (in_use_) { | 2121 if (in_use_) { |
| 2127 // This can happen in mark-compact GC if the incremental marker already | 2122 // This can happen in mark-compact GC if the incremental marker already |
| 2128 // started using the marking deque. | 2123 // started using the marking deque. |
| 2129 return; | 2124 return; |
| 2130 } | 2125 } |
| 2131 in_use_ = true; | 2126 in_use_ = true; |
| 2132 EnsureCommitted(); | 2127 EnsureCommitted(); |
| 2133 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); | 2128 array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); |
| 2134 size_t size = FLAG_force_marking_deque_overflows | 2129 size_t size = FLAG_force_marking_deque_overflows |
| 2135 ? 64 * kPointerSize | 2130 ? 64 * kPointerSize |
| 2136 : backing_store_committed_size_; | 2131 : backing_store_committed_size_; |
| 2137 DCHECK( | 2132 DCHECK( |
| 2138 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); | 2133 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize))); |
| 2139 mask_ = static_cast<int>((size / kPointerSize) - 1); | 2134 mask_ = static_cast<int>((size / kPointerSize) - 1); |
| 2140 top_ = bottom_ = 0; | 2135 top_ = bottom_ = 0; |
| 2141 overflowed_ = false; | 2136 overflowed_ = false; |
| 2142 } | 2137 } |
| 2143 | 2138 |
| 2144 void MarkingDeque::StopUsing() { | 2139 void MarkingDeque::StopUsing() { |
| 2145 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 2146 DCHECK(IsEmpty()); | 2140 DCHECK(IsEmpty()); |
| 2147 DCHECK(!overflowed_); | 2141 DCHECK(!overflowed_); |
| 2148 top_ = bottom_ = mask_ = 0; | 2142 top_ = bottom_ = mask_ = 0; |
| 2143 Uncommit(); |
| 2149 in_use_ = false; | 2144 in_use_ = false; |
| 2150 if (FLAG_concurrent_sweeping) { | |
| 2151 StartUncommitTask(); | |
| 2152 } else { | |
| 2153 Uncommit(); | |
| 2154 } | |
| 2155 } | 2145 } |
| 2156 | 2146 |
| 2157 void MarkingDeque::Clear() { | 2147 void MarkingDeque::Clear() { |
| 2158 DCHECK(in_use_); | 2148 DCHECK(in_use_); |
| 2159 top_ = bottom_ = 0; | 2149 top_ = bottom_ = 0; |
| 2160 overflowed_ = false; | 2150 overflowed_ = false; |
| 2161 } | 2151 } |
| 2162 | 2152 |
| 2163 void MarkingDeque::Uncommit() { | 2153 void MarkingDeque::Uncommit() { |
| 2164 DCHECK(!in_use_); | 2154 DCHECK(in_use_); |
| 2165 bool success = backing_store_->Uncommit(backing_store_->address(), | 2155 bool success = backing_store_->Uncommit(backing_store_->address(), |
| 2166 backing_store_committed_size_); | 2156 backing_store_committed_size_); |
| 2167 backing_store_committed_size_ = 0; | 2157 backing_store_committed_size_ = 0; |
| 2168 CHECK(success); | 2158 CHECK(success); |
| 2169 } | 2159 } |
| 2170 | 2160 |
| 2171 void MarkingDeque::EnsureCommitted() { | 2161 void MarkingDeque::EnsureCommitted() { |
| 2172 DCHECK(in_use_); | 2162 DCHECK(in_use_); |
| 2173 if (backing_store_committed_size_ > 0) return; | 2163 if (backing_store_committed_size_ > 0) return; |
| 2174 | 2164 |
| 2175 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { | 2165 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { |
| 2176 if (backing_store_->Commit(backing_store_->address(), size, false)) { | 2166 if (backing_store_->Commit(backing_store_->address(), size, false)) { |
| 2177 backing_store_committed_size_ = size; | 2167 backing_store_committed_size_ = size; |
| 2178 break; | 2168 break; |
| 2179 } | 2169 } |
| 2180 } | 2170 } |
| 2181 if (backing_store_committed_size_ == 0) { | 2171 if (backing_store_committed_size_ == 0) { |
| 2182 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); | 2172 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted"); |
| 2183 } | 2173 } |
| 2184 } | 2174 } |
| 2185 | 2175 |
| 2186 void MarkingDeque::StartUncommitTask() { | |
| 2187 if (!uncommit_task_pending_) { | |
| 2188 UncommitTask* task = new UncommitTask(heap_->isolate(), this); | |
| 2189 uncommit_task_id_ = task->id(); | |
| 2190 uncommit_task_pending_ = true; | |
| 2191 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 2192 task, v8::Platform::kShortRunningTask); | |
| 2193 } | |
| 2194 } | |
| 2195 | |
| 2196 void MarkingDeque::CancelOrWaitForUncommitTask() { | |
| 2197 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 2198 if (!uncommit_task_pending_ || | |
| 2199 heap_->isolate()->cancelable_task_manager()->TryAbort( | |
| 2200 uncommit_task_id_)) { | |
| 2201 return; | |
| 2202 } | |
| 2203 while (uncommit_task_pending_) { | |
| 2204 uncommit_task_barrier_.Wait(&mutex_); | |
| 2205 } | |
| 2206 } | |
| 2207 | |
| 2208 class MarkCompactCollector::ObjectStatsVisitor | 2176 class MarkCompactCollector::ObjectStatsVisitor |
| 2209 : public MarkCompactCollector::HeapObjectVisitor { | 2177 : public MarkCompactCollector::HeapObjectVisitor { |
| 2210 public: | 2178 public: |
| 2211 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, | 2179 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats, |
| 2212 ObjectStats* dead_stats) | 2180 ObjectStats* dead_stats) |
| 2213 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { | 2181 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) { |
| 2214 DCHECK_NOT_NULL(live_stats); | 2182 DCHECK_NOT_NULL(live_stats); |
| 2215 DCHECK_NOT_NULL(dead_stats); | 2183 DCHECK_NOT_NULL(dead_stats); |
| 2216 // Global objects are roots and thus recorded as live. | 2184 // Global objects are roots and thus recorded as live. |
| 2217 live_collector_.CollectGlobalStatistics(); | 2185 live_collector_.CollectGlobalStatistics(); |
| (...skipping 1706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3924 // The target is always in old space, we don't have to record the slot in | 3892 // The target is always in old space, we don't have to record the slot in |
| 3925 // the old-to-new remembered set. | 3893 // the old-to-new remembered set. |
| 3926 DCHECK(!heap()->InNewSpace(target)); | 3894 DCHECK(!heap()->InNewSpace(target)); |
| 3927 RecordRelocSlot(host, &rinfo, target); | 3895 RecordRelocSlot(host, &rinfo, target); |
| 3928 } | 3896 } |
| 3929 } | 3897 } |
| 3930 } | 3898 } |
| 3931 | 3899 |
| 3932 } // namespace internal | 3900 } // namespace internal |
| 3933 } // namespace v8 | 3901 } // namespace v8 |
| OLD | NEW |