Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(256)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Refactored Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 18 matching lines...) Expand all
29 */ 29 */
30 30
31 #include "platform/heap/ThreadState.h" 31 #include "platform/heap/ThreadState.h"
32 32
33 #include "platform/ScriptForbiddenScope.h" 33 #include "platform/ScriptForbiddenScope.h"
34 #include "platform/TraceEvent.h" 34 #include "platform/TraceEvent.h"
35 #include "platform/heap/BlinkGCMemoryDumpProvider.h" 35 #include "platform/heap/BlinkGCMemoryDumpProvider.h"
36 #include "platform/heap/CallbackStack.h" 36 #include "platform/heap/CallbackStack.h"
37 #include "platform/heap/Handle.h" 37 #include "platform/heap/Handle.h"
38 #include "platform/heap/Heap.h" 38 #include "platform/heap/Heap.h"
39 #include "platform/heap/HeapPage.h"
39 #include "platform/heap/MarkingVisitor.h" 40 #include "platform/heap/MarkingVisitor.h"
41 #include "platform/heap/PagePool.h"
40 #include "platform/heap/SafePoint.h" 42 #include "platform/heap/SafePoint.h"
41 #include "public/platform/Platform.h" 43 #include "public/platform/Platform.h"
42 #include "public/platform/WebMemoryAllocatorDump.h" 44 #include "public/platform/WebMemoryAllocatorDump.h"
43 #include "public/platform/WebProcessMemoryDump.h" 45 #include "public/platform/WebProcessMemoryDump.h"
44 #include "public/platform/WebScheduler.h" 46 #include "public/platform/WebScheduler.h"
45 #include "public/platform/WebThread.h" 47 #include "public/platform/WebThread.h"
46 #include "public/platform/WebTraceLocation.h" 48 #include "public/platform/WebTraceLocation.h"
47 #include "wtf/DataLog.h" 49 #include "wtf/DataLog.h"
48 #include "wtf/Partitions.h" 50 #include "wtf/Partitions.h"
49 #include "wtf/ThreadingPrimitives.h" 51 #include "wtf/ThreadingPrimitives.h"
50 52
51 #if OS(WIN) 53 #if OS(WIN)
52 #include <stddef.h> 54 #include <stddef.h>
53 #include <windows.h> 55 #include <windows.h>
54 #include <winnt.h> 56 #include <winnt.h>
55 #elif defined(__GLIBC__) 57 #elif defined(__GLIBC__)
56 extern "C" void* __libc_stack_end; // NOLINT 58 extern "C" void* __libc_stack_end; // NOLINT
57 #endif 59 #endif
58 60
59 #if defined(MEMORY_SANITIZER) 61 #if defined(MEMORY_SANITIZER)
60 #include <sanitizer/msan_interface.h> 62 #include <sanitizer/msan_interface.h>
61 #endif 63 #endif
62 64
63 #if OS(FREEBSD) 65 #if OS(FREEBSD)
64 #include <pthread_np.h> 66 #include <pthread_np.h>
65 #endif 67 #endif
66 68
67 namespace blink { 69 namespace blink {
68 70
71 GCHeapStats::GCHeapStats()
72 : m_allocatedSpace(0)
73 , m_allocatedObjectSize(0)
74 , m_objectSizeAtLastGC(0)
75 , m_markedObjectSize(0)
76 , m_markedObjectSizeAtLastCompleteSweep(0)
77 , m_wrapperCount(0)
78 , m_wrapperCountAtLastGC(0)
79 , m_collectedWrapperCount(0)
80 , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages())
81 , m_estimatedMarkingTimePerByte(0.0)
82 {
83 }
84
85 double GCHeapStats::estimatedMarkingTime()
86 {
87 // Use 8 ms as initial estimated marking time.
88 // 8 ms is long enough for low-end mobile devices to mark common
89 // real-world object graphs.
90 if (m_estimatedMarkingTimePerByte == 0)
91 return 0.008;
92
93 // Assuming that the collection rate of this GC will be mostly equal to
94 // the collection rate of the last GC, estimate the marking time of this GC.
95 return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObject Size());
96 }
97
98 void GCHeapStats::reset()
99 {
100 m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize;
101 m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
haraken 2016/02/12 11:28:52 It would be problematic to simply use PartitionAll
102 m_allocatedObjectSize = 0;
103 m_markedObjectSize = 0;
104 m_wrapperCountAtLastGC = m_wrapperCount;
105 m_collectedWrapperCount = 0;
106 }
107
108 void GCHeapStats::increaseAllocatedObjectSize(size_t delta)
haraken 2016/02/12 11:28:52 These changes will be made by peria-san's CL: htt
109 {
110 atomicAdd(&m_allocatedObjectSize, static_cast<long>(delta));
111 Heap::increaseTotalAllocatedObjectSize(delta);
112 }
113
114 void GCHeapStats::decreaseAllocatedObjectSize(size_t delta)
115 {
116 atomicSubtract(&m_allocatedObjectSize, static_cast<long>(delta));
117 Heap::decreaseTotalAllocatedObjectSize(delta);
118 }
119
120 void GCHeapStats::increaseMarkedObjectSize(size_t delta)
121 {
122 atomicAdd(&m_markedObjectSize, static_cast<long>(delta));
123 Heap::increaseTotalMarkedObjectSize(delta);
124 }
125
126 void GCHeapStats::increaseAllocatedSpace(size_t delta)
127 {
128 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
129 Heap::increaseTotalAllocatedSpace(delta);
130 }
131
132 void GCHeapStats::decreaseAllocatedSpace(size_t delta)
133 {
134 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
135 Heap::decreaseTotalAllocatedSpace(delta);
136 }
137
138 HashSet<GCGroup*>& GCGroup::all()
139 {
140 DEFINE_STATIC_LOCAL(HashSet<GCGroup*>, groups, ());
141 return groups;
142 }
143
144 GCGroup::GCGroup()
145 : m_regionTree(nullptr)
146 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache))
147 {
148 }
149
150 GCGroup::~GCGroup()
151 {
152 }
153
154 void GCGroup::flushHeapDoesNotContainCache()
155 {
156 m_heapDoesNotContainCache->flush();
157 }
158
159 MultiThreadGCGroup::MultiThreadGCGroup()
160 : m_safePointBarrier(adoptPtr(new SafePointBarrier(this)))
161 , m_freePagePool(adoptPtr(new FreePagePool))
haraken 2016/02/12 11:28:52 Maybe we should share the FreePagePool among all t
162 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool))
163 {
164 GCGroup::all().add(this);
165 }
166
167 MultiThreadGCGroup::~MultiThreadGCGroup()
168 {
169 GCGroup::all().remove(this);
170 }
171
172 void MultiThreadGCGroup::attach(ThreadState* thread)
173 {
174 MutexLocker locker(m_threadAttachMutex);
175 m_threads.add(thread);
176 }
177
178 void MultiThreadGCGroup::detach(ThreadState* thread)
179 {
180 SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPointer sOnStack);
181 thread->cleanupCallback();
182 ASSERT(m_threads.contains(thread));
183 m_threads.remove(thread);
184 }
185
186 bool MultiThreadGCGroup::park()
187 {
188 return m_safePointBarrier->parkOthers();
189 }
190
191 void MultiThreadGCGroup::resume()
192 {
193 m_safePointBarrier->resumeOthers();
194 }
195
196 bool MultiThreadGCGroup::isParked() const
haraken 2016/02/12 11:28:52 Add #if ENABLE(ASSERT).
haraken 2016/02/12 11:28:52 isParked => isAtSafePoint
keishi 2016/02/29 06:02:33 Done.
keishi 2016/02/29 06:02:33 Done.
197 {
198 for (ThreadState* state : m_threads) {
199 if (!state->isAtSafePoint())
200 return false;
201 }
202 return true;
203 }
204
205 void MultiThreadGCGroup::lockThreadAttachMutex()
206 {
207 m_threadAttachMutex.lock();
208 }
209
210 void MultiThreadGCGroup::unlockThreadAttachMutex()
211 {
212 m_threadAttachMutex.unlock();
213 }
214
215 #if ENABLE(ASSERT)
216 BasePage* MultiThreadGCGroup::findPageFromAddress(Address address)
217 {
218 for (ThreadState* state : m_threads) {
219 if (BasePage* page = state->findPageFromAddress(address))
220 return page;
221 }
222 return nullptr;
223 }
224 #endif
225
226 void MultiThreadGCGroup::preGC()
227 {
228 for (ThreadState* state : m_threads) {
229 state->preGC();
230 }
231 }
232
233 void MultiThreadGCGroup::postGC(BlinkGC::GCType gcType)
234 {
235 for (ThreadState* state : m_threads) {
236 state->postGC(gcType);
237 }
238 }
239
240 size_t MultiThreadGCGroup::objectPayloadSizeForTesting()
241 {
242 size_t objectPayloadSize = 0;
243 for (ThreadState* state : m_threads) {
244 state->setGCState(ThreadState::GCRunning);
245 state->makeConsistentForGC();
246 objectPayloadSize += state->objectPayloadSizeForTesting();
247 state->setGCState(ThreadState::EagerSweepScheduled);
248 state->setGCState(ThreadState::Sweeping);
249 state->setGCState(ThreadState::NoGCScheduled);
250 }
251 return objectPayloadSize;
252 }
253
254 size_t MultiThreadGCGroup::size() const
haraken 2016/02/12 11:28:52 size => threadCount ?
keishi 2016/02/29 06:02:34 Done.
255 {
256 return m_threads.size();
257 }
258
259 void MultiThreadGCGroup::visitPersistentRoots(Visitor* visitor)
260 {
261 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
262 Heap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
263
264 for (ThreadState* state : m_threads) {
265 state->visitPersistents(visitor);
266 }
267 }
268
269 void MultiThreadGCGroup::visitStackRoots(Visitor* visitor)
270 {
271 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
272 for (ThreadState* state : m_threads) {
273 state->visitStack(visitor);
274 }
275 }
276
277 void MultiThreadGCGroup::checkAndPark(ThreadState* threadState, SafePointAwareMu texLocker* locker)
278 {
279 m_safePointBarrier->checkAndPark(threadState, locker);
280 }
281
282 void MultiThreadGCGroup::enterSafePoint(ThreadState* threadState)
283 {
284 m_safePointBarrier->enterSafePoint(threadState);
285 }
286
287 void MultiThreadGCGroup::leaveSafePoint(ThreadState* threadState, SafePointAware MutexLocker* locker)
288 {
289 m_safePointBarrier->leaveSafePoint(threadState, locker);
290 }
291
292 void MultiThreadGCGroup::shutdown()
293 {
294 Heap::shutdown();
295 }
296
297 void MultiThreadGCGroup::shutdownIfNecessary()
298 {
299 MutexLocker locker(m_threadAttachMutex);
300 if (m_threads.size() == 0 && Heap::s_shutdownCalled) {
301 shutdown();
haraken 2016/02/12 11:28:52 As commented in Heap::shutdown(), I think we need
302 delete this;
303 }
304 }
305
306 BasePage* GCGroup::lookupPageForAddress(Address address)
307 {
308 ASSERT(ThreadState::current()->isInGC());
309 if (!m_regionTree)
310 return nullptr;
311 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
312 BasePage* page = region->pageFromAddress(address);
313 return page && !page->orphaned() ? page : nullptr;
314 }
315 return nullptr;
316 }
317
318 void GCGroup::addPageMemoryRegion(PageMemoryRegion* region)
319 {
320 MutexLocker locker(m_regionTreeMutex);
321 RegionTree::add(new RegionTree(region), &m_regionTree);
322 }
323
324 void GCGroup::removePageMemoryRegion(PageMemoryRegion* region)
325 {
326 // Deletion of large objects (and thus their regions) can happen
327 // concurrently on sweeper threads. Removal can also happen during thread
328 // shutdown, but that case is safe. Regardless, we make all removals
329 // mutually exclusive.
330 MutexLocker locker(m_regionTreeMutex);
331 RegionTree::remove(region, &m_regionTree);
332 }
333
69 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 334 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
70 uintptr_t ThreadState::s_mainThreadStackStart = 0; 335 uintptr_t ThreadState::s_mainThreadStackStart = 0;
71 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 336 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
72 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 337 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
73 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; 338
74 339 ThreadState::ThreadState(bool perThreadHeapEnabled)
75 RecursiveMutex& ThreadState::threadAttachMutex()
76 {
77 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
78 return mutex;
79 }
80
81 ThreadState::ThreadState()
82 : m_thread(currentThread()) 340 : m_thread(currentThread())
83 , m_persistentRegion(adoptPtr(new PersistentRegion())) 341 , m_persistentRegion(adoptPtr(new PersistentRegion()))
84 #if OS(WIN) && COMPILER(MSVC) 342 #if OS(WIN) && COMPILER(MSVC)
85 , m_threadStackSize(0) 343 , m_threadStackSize(0)
86 #endif 344 #endif
345 , m_perThreadHeapEnabled(perThreadHeapEnabled)
87 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) 346 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( )))
88 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) 347 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) )
89 , m_safePointScopeMarker(nullptr) 348 , m_safePointScopeMarker(nullptr)
90 , m_atSafePoint(false) 349 , m_atSafePoint(false)
91 , m_interruptors() 350 , m_interruptors()
92 , m_sweepForbidden(false) 351 , m_sweepForbidden(false)
93 , m_noAllocationCount(0) 352 , m_noAllocationCount(0)
94 , m_gcForbiddenCount(0) 353 , m_gcForbiddenCount(0)
95 , m_accumulatedSweepingTime(0) 354 , m_accumulatedSweepingTime(0)
96 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) 355 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex)
97 , m_currentHeapAges(0) 356 , m_currentHeapAges(0)
98 , m_isTerminating(false) 357 , m_isTerminating(false)
99 , m_gcMixinMarker(nullptr) 358 , m_gcMixinMarker(nullptr)
100 , m_shouldFlushHeapDoesNotContainCache(false) 359 , m_shouldFlushHeapDoesNotContainCache(false)
101 , m_gcState(NoGCScheduled) 360 , m_gcState(NoGCScheduled)
102 , m_traceDOMWrappers(nullptr) 361 , m_traceDOMWrappers(nullptr)
362 , m_gcGroup(nullptr)
103 #if defined(ADDRESS_SANITIZER) 363 #if defined(ADDRESS_SANITIZER)
104 , m_asanFakeStack(__asan_get_current_fake_stack()) 364 , m_asanFakeStack(__asan_get_current_fake_stack())
105 #endif 365 #endif
106 #if defined(LEAK_SANITIZER) 366 #if defined(LEAK_SANITIZER)
107 , m_disabledStaticPersistentsRegistration(0) 367 , m_disabledStaticPersistentsRegistration(0)
108 #endif 368 #endif
109 { 369 {
110 ASSERT(checkThread()); 370 ASSERT(checkThread());
111 ASSERT(!**s_threadSpecific); 371 ASSERT(!**s_threadSpecific);
112 **s_threadSpecific = this; 372 **s_threadSpecific = this;
113 373
114 if (isMainThread()) { 374 if (isMainThread()) {
115 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); 375 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*);
116 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); 376 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size();
117 if (underestimatedStackSize > sizeof(void*)) 377 if (underestimatedStackSize > sizeof(void*))
118 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); 378 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*);
379 m_gcGroup = new MultiThreadGCGroup();
380 } else if (m_perThreadHeapEnabled) {
381 m_gcGroup = new MultiThreadGCGroup();
382 } else {
383 m_gcGroup = ThreadState::mainThreadState()->gcGroup();
119 } 384 }
385 ASSERT(m_gcGroup);
386 m_gcGroup->attach(this);
120 387
121 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex ++) 388 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex ++)
122 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); 389 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex);
123 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC:: LargeObjectHeapIndex); 390 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC:: LargeObjectHeapIndex);
124 391
125 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]); 392 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]);
126 clearHeapAges(); 393 clearHeapAges();
127 394
128 m_threadLocalWeakCallbackStack = new CallbackStack(); 395 m_threadLocalWeakCallbackStack = new CallbackStack();
129 } 396 }
130 397
131 ThreadState::~ThreadState() 398 ThreadState::~ThreadState()
132 { 399 {
133 ASSERT(checkThread()); 400 ASSERT(checkThread());
134 delete m_threadLocalWeakCallbackStack; 401 delete m_threadLocalWeakCallbackStack;
135 m_threadLocalWeakCallbackStack = nullptr; 402 m_threadLocalWeakCallbackStack = nullptr;
136 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 403 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
137 delete m_heaps[i]; 404 delete m_heaps[i];
138 405
139 **s_threadSpecific = nullptr; 406 **s_threadSpecific = nullptr;
140 if (isMainThread()) { 407 if (isMainThread()) {
141 s_mainThreadStackStart = 0; 408 s_mainThreadStackStart = 0;
142 s_mainThreadUnderestimatedStackSize = 0; 409 s_mainThreadUnderestimatedStackSize = 0;
143 } 410 }
144 } 411 }
145 412
146 void ThreadState::init() 413 void ThreadState::init()
147 { 414 {
148 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); 415 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
149 s_safePointBarrier = new SafePointBarrier;
150 } 416 }
151 417
152 void ThreadState::shutdown() 418 void ThreadState::shutdown()
153 { 419 {
154 delete s_safePointBarrier;
155 s_safePointBarrier = nullptr;
156
157 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). 420 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
158 } 421 }
159 422
160 #if OS(WIN) && COMPILER(MSVC) 423 #if OS(WIN) && COMPILER(MSVC)
161 size_t ThreadState::threadStackSize() 424 size_t ThreadState::threadStackSize()
162 { 425 {
163 if (m_threadStackSize) 426 if (m_threadStackSize)
164 return m_threadStackSize; 427 return m_threadStackSize;
165 428
166 // Notice that we cannot use the TIB's StackLimit for the stack end, as it 429 // Notice that we cannot use the TIB's StackLimit for the stack end, as it
(...skipping 18 matching lines...) Expand all
185 // as off-limits and adjust the reported stack size accordingly. 448 // as off-limits and adjust the reported stack size accordingly.
186 // 449 //
187 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx 450 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx
188 // explains the details. 451 // explains the details.
189 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); 452 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000);
190 m_threadStackSize -= 4 * 0x1000; 453 m_threadStackSize -= 4 * 0x1000;
191 return m_threadStackSize; 454 return m_threadStackSize;
192 } 455 }
193 #endif 456 #endif
194 457
195 void ThreadState::attachMainThread() 458 void ThreadState::prepareForMainThread()
196 { 459 {
197 RELEASE_ASSERT(!Heap::s_shutdownCalled); 460 RELEASE_ASSERT(!Heap::s_shutdownCalled);
198 MutexLocker locker(threadAttachMutex()); 461 new(s_mainThreadStateStorage) ThreadState(false);
199 ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
200 attachedThreads().add(state);
201 } 462 }
202 463
203 void ThreadState::detachMainThread() 464 void ThreadState::detachMainThread()
204 { 465 {
205 // Enter a safe point before trying to acquire threadAttachMutex 466 // Enter a safe point before trying to acquire threadAttachMutex
206 // to avoid dead lock if another thread is preparing for GC, has acquired 467 // to avoid dead lock if another thread is preparing for GC, has acquired
207 // threadAttachMutex and waiting for other threads to pause or reach a 468 // threadAttachMutex and waiting for other threads to pause or reach a
208 // safepoint. 469 // safepoint.
209 ThreadState* state = mainThreadState(); 470 ThreadState* state = mainThreadState();
471 GCGroup* gcGroup = state->gcGroup();
210 472
211 // 1. Finish sweeping. 473 // 1. Finish sweeping.
212 state->completeSweep(); 474 state->completeSweep();
213 { 475 gcGroup->detach(state);
214 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack); 476 state->~ThreadState();
215 477 gcGroup->shutdownIfNecessary();
216 // 2. Add the main thread's heap pages to the orphaned pool.
217 state->cleanupPages();
218
219 // 3. Detach the main thread.
220 ASSERT(attachedThreads().contains(state));
221 attachedThreads().remove(state);
222 state->~ThreadState();
223 }
224 shutdownHeapIfNecessary();
225 } 478 }
226 479
227 void ThreadState::shutdownHeapIfNecessary() 480 void ThreadState::prepareForCurrentThread(bool perThreadHeap)
228 {
229 // We don't need to enter a safe point before acquiring threadAttachMutex
230 // because this thread is already detached.
231
232 MutexLocker locker(threadAttachMutex());
233 // We start shutting down the heap if there is no running thread
234 // and Heap::shutdown() is already called.
235 if (!attachedThreads().size() && Heap::s_shutdownCalled)
236 Heap::doShutdown();
237 }
238
239 void ThreadState::attach()
240 { 481 {
241 RELEASE_ASSERT(!Heap::s_shutdownCalled); 482 RELEASE_ASSERT(!Heap::s_shutdownCalled);
242 MutexLocker locker(threadAttachMutex()); 483 new ThreadState(perThreadHeap);
243 ThreadState* state = new ThreadState();
244 attachedThreads().add(state);
245 } 484 }
246 485
247 void ThreadState::cleanupPages() 486 void ThreadState::cleanupPages()
248 { 487 {
249 ASSERT(checkThread()); 488 ASSERT(checkThread());
250 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 489 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
251 m_heaps[i]->cleanupPages(); 490 m_heaps[i]->cleanupPages();
252 } 491 }
253 492
254 void ThreadState::cleanup() 493 void ThreadState::cleanupCallback()
haraken 2016/02/12 11:28:52 cleanupCallback => cleanupThreadHeap ?
keishi 2016/02/29 06:02:33 Done.
255 { 494 {
256 ASSERT(checkThread()); 495 if (isMainThread()) {
haraken 2016/02/12 11:28:52 This should be: if (isMainThread() || m_perThre
keishi 2016/02/29 06:02:33 Done.
257 { 496 cleanupPages();
258 // Grab the threadAttachMutex to ensure only one thread can shutdown at 497 return;
259 // a time and that no other thread can do a global GC. It also allows 498 }
260 // safe iteration of the attachedThreads set which happens as part of
261 // thread local GC asserts. We enter a safepoint while waiting for the
262 // lock to avoid a dead-lock where another thread has already requested
263 // GC.
264 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack);
265 499
266 // Finish sweeping. 500 // Finish sweeping.
267 completeSweep(); 501 completeSweep();
268 502
269 // From here on ignore all conservatively discovered 503 // From here on ignore all conservatively discovered
270 // pointers into the heap owned by this thread. 504 // pointers into the heap owned by this thread.
271 m_isTerminating = true; 505 m_isTerminating = true;
272 506
273 // Set the terminate flag on all heap pages of this thread. This is used to 507 // Set the terminate flag on all heap pages of this thread. This is used to
274 // ensure we don't trace pages on other threads that are not part of the 508 // ensure we don't trace pages on other threads that are not part of the
275 // thread local GC. 509 // thread local GC.
276 prepareForThreadStateTermination(); 510 prepareForThreadStateTermination();
277 511
278 Heap::crossThreadPersistentRegion().prepareForThreadStateTermination(thi s); 512 Heap::crossThreadPersistentRegion().prepareForThreadStateTermination(this);
279 513
280 // Do thread local GC's as long as the count of thread local Persistents 514 // Do thread local GC's as long as the count of thread local Persistents
281 // changes and is above zero. 515 // changes and is above zero.
282 int oldCount = -1; 516 int oldCount = -1;
283 int currentCount = persistentRegion()->numberOfPersistents(); 517 int currentCount = persistentRegion()->numberOfPersistents();
284 ASSERT(currentCount >= 0); 518 ASSERT(currentCount >= 0);
285 while (currentCount != oldCount) { 519 while (currentCount != oldCount) {
286 Heap::collectGarbageForTerminatingThread(this); 520 Heap::collectGarbageForTerminatingThread(this);
287 oldCount = currentCount; 521 oldCount = currentCount;
288 currentCount = persistentRegion()->numberOfPersistents(); 522 currentCount = persistentRegion()->numberOfPersistents();
289 } 523 }
haraken 2016/02/12 11:28:52 If m_perThreadHeapEnabled is true, the termination
290 // We should not have any persistents left when getting to this point, 524 // We should not have any persistents left when getting to this point,
291 // if we have it is probably a bug so adding a debug ASSERT to catch thi s. 525 // if we have it is probably a bug so adding a debug ASSERT to catch this.
292 ASSERT(!currentCount); 526 ASSERT(!currentCount);
293 // All of pre-finalizers should be consumed. 527 // All of pre-finalizers should be consumed.
294 ASSERT(m_orderedPreFinalizers.isEmpty()); 528 ASSERT(m_orderedPreFinalizers.isEmpty());
295 RELEASE_ASSERT(gcState() == NoGCScheduled); 529 RELEASE_ASSERT(gcState() == NoGCScheduled);
296 530
297 // Add pages to the orphaned page pool to ensure any global GCs from thi s point 531 // Add pages to the orphaned page pool to ensure any global GCs from this po int
298 // on will not trace objects on this thread's heaps. 532 // on will not trace objects on this thread's heaps.
299 cleanupPages(); 533 cleanupPages();
300
301 ASSERT(attachedThreads().contains(this));
302 attachedThreads().remove(this);
303 }
304 } 534 }
305 535
306 void ThreadState::detach() 536 void ThreadState::detach()
307 { 537 {
308 ThreadState* state = current(); 538 ThreadState* state = current();
309 state->cleanup(); 539 GCGroup* gcGroup = state->gcGroup();
540 gcGroup->detach(state);
310 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); 541 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled);
311 delete state; 542 delete state;
312 shutdownHeapIfNecessary(); 543 gcGroup->shutdownIfNecessary();
313 }
314
315 void ThreadState::visitPersistentRoots(Visitor* visitor)
316 {
317 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
318 Heap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
319
320 for (ThreadState* state : attachedThreads())
321 state->visitPersistents(visitor);
322 }
323
324 void ThreadState::visitStackRoots(Visitor* visitor)
325 {
326 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
327 for (ThreadState* state : attachedThreads())
328 state->visitStack(visitor);
329 } 544 }
330 545
331 NO_SANITIZE_ADDRESS 546 NO_SANITIZE_ADDRESS
332 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 547 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
333 { 548 {
334 #if defined(ADDRESS_SANITIZER) 549 #if defined(ADDRESS_SANITIZER)
335 Address* start = reinterpret_cast<Address*>(m_startOfStack); 550 Address* start = reinterpret_cast<Address*>(m_startOfStack);
336 Address* end = reinterpret_cast<Address*>(m_endOfStack); 551 Address* end = reinterpret_cast<Address*>(m_endOfStack);
337 Address* fakeFrameStart = nullptr; 552 Address* fakeFrameStart = nullptr;
338 Address* fakeFrameEnd = nullptr; 553 Address* fakeFrameEnd = nullptr;
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
473 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; 688 ScriptForbiddenIfMainThreadScope scriptForbiddenScope;
474 689
475 // Disallow allocation during weak processing. 690 // Disallow allocation during weak processing.
476 // It would be technically safe to allow allocations, but it is unsafe 691 // It would be technically safe to allow allocations, but it is unsafe
477 // to mutate an object graph in a way in which a dead object gets 692 // to mutate an object graph in a way in which a dead object gets
478 // resurrected or mutate a HashTable (because HashTable's weak processing 693 // resurrected or mutate a HashTable (because HashTable's weak processing
479 // assumes that the HashTable hasn't been mutated since the latest marking). 694 // assumes that the HashTable hasn't been mutated since the latest marking).
480 // Due to the complexity, we just forbid allocations. 695 // Due to the complexity, we just forbid allocations.
481 NoAllocationScope noAllocationScope(this); 696 NoAllocationScope noAllocationScope(this);
482 697
483 MarkingVisitor<Visitor::WeakProcessing> weakProcessingVisitor; 698 MarkingVisitor<Visitor::WeakProcessing> weakProcessingVisitor(nullptr);
484 699
485 // Perform thread-specific weak processing. 700 // Perform thread-specific weak processing.
486 while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { } 701 while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { }
487 702
488 if (isMainThread()) { 703 if (isMainThread()) {
489 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me; 704 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me;
490 Platform::current()->histogramCustomCounts("BlinkGC.timeForThreadLocalWe akProcessing", timeForThreadLocalWeakProcessing, 1, 10 * 1000, 50); 705 Platform::current()->histogramCustomCounts("BlinkGC.timeForThreadLocalWe akProcessing", timeForThreadLocalWeakProcessing, 1, 10 * 1000, 50);
491 } 706 }
492 } 707 }
493 708
494 size_t ThreadState::totalMemorySize() 709 size_t ThreadState::totalMemorySize()
495 { 710 {
496 return Heap::allocatedObjectSize() + Heap::markedObjectSize() + WTF::Partiti ons::totalSizeOfCommittedPages(); 711 return gcGroup()->heapStats().allocatedObjectSize() + gcGroup()->heapStats() .markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages();
497 } 712 }
498 713
499 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) 714 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC)
500 { 715 {
501 if (Heap::wrapperCountAtLastGC() == 0) { 716 if (gcGroup()->heapStats().wrapperCountAtLastGC() == 0) {
502 // We'll reach here only before hitting the first GC. 717 // We'll reach here only before hitting the first GC.
503 return 0; 718 return 0;
504 } 719 }
505 720
506 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); 721 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC);
507 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / Heap::wrapperCountAtLastGC() * Heap::collectedWrapperCount()); 722 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / gcGroup()->heapStats().wrapperCountAtLastGC() * gcGroup()->heapStats(). collectedWrapperCount());
508 if (estimationBaseSize < sizeRetainedByCollectedPersistents) 723 if (estimationBaseSize < sizeRetainedByCollectedPersistents)
509 return 0; 724 return 0;
510 return estimationBaseSize - sizeRetainedByCollectedPersistents; 725 return estimationBaseSize - sizeRetainedByCollectedPersistents;
511 } 726 }
512 727
513 double ThreadState::heapGrowingRate() 728 double ThreadState::heapGrowingRate()
514 { 729 {
515 size_t currentSize = Heap::allocatedObjectSize() + Heap::markedObjectSize(); 730 size_t currentSize = gcGroup()->heapStats().allocatedObjectSize() + gcGroup( )->heapStats().markedObjectSize();
516 size_t estimatedSize = estimatedLiveSize(Heap::markedObjectSizeAtLastComplet eSweep(), Heap::markedObjectSizeAtLastCompleteSweep()); 731 size_t estimatedSize = estimatedLiveSize(gcGroup()->heapStats().markedObject SizeAtLastCompleteSweep(), gcGroup()->heapStats().markedObjectSizeAtLastComplete Sweep());
517 732
518 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 733 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
519 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 734 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
520 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); 735 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX)));
521 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); 736 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate));
522 return growingRate; 737 return growingRate;
523 } 738 }
524 739
525 double ThreadState::partitionAllocGrowingRate() 740 double ThreadState::partitionAllocGrowingRate()
526 { 741 {
527 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); 742 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages();
528 size_t estimatedSize = estimatedLiveSize(currentSize, Heap::partitionAllocSi zeAtLastGC()); 743 size_t estimatedSize = estimatedLiveSize(currentSize, gcGroup()->heapStats() .partitionAllocSizeAtLastGC());
529 744
530 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 745 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
531 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 746 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
532 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); 747 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX)));
533 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); 748 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate));
534 return growingRate; 749 return growingRate;
535 } 750 }
536 751
537 // TODO(haraken): We should improve the GC heuristics. The heuristics affect 752 // TODO(haraken): We should improve the GC heuristics. The heuristics affect
538 // performance significantly. 753 // performance significantly.
539 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) 754 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold)
540 { 755 {
541 // If the allocated object size or the total memory size is small, don't tri gger a GC. 756 // If the allocated object size or the total memory size is small, don't tri gger a GC.
542 if (Heap::allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMem orySizeThreshold) 757 if (gcGroup()->heapStats().allocatedObjectSize() < 100 * 1024 || totalMemory Size() < totalMemorySizeThreshold)
543 return false; 758 return false;
544 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, 759 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough,
545 // trigger a GC. 760 // trigger a GC.
546 #if PRINT_HEAP_STATS 761 #if PRINT_HEAP_STATS
547 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); 762 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate());
548 #endif 763 #endif
549 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; 764 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold;
550 } 765 }
551 766
552 bool ThreadState::shouldScheduleIdleGC() 767 bool ThreadState::shouldScheduleIdleGC()
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
740 void ThreadState::performIdleGC(double deadlineSeconds) 955 void ThreadState::performIdleGC(double deadlineSeconds)
741 { 956 {
742 ASSERT(checkThread()); 957 ASSERT(checkThread());
743 ASSERT(isMainThread()); 958 ASSERT(isMainThread());
744 ASSERT(Platform::current()->currentThread()->scheduler()); 959 ASSERT(Platform::current()->currentThread()->scheduler());
745 960
746 if (gcState() != IdleGCScheduled) 961 if (gcState() != IdleGCScheduled)
747 return; 962 return;
748 963
749 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds(); 964 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds();
750 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); 965 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", ThreadState::current()->gcGroup()-> heapStats().estimatedMarkingTime());
751 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current ()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { 966 if (idleDeltaInSeconds <= ThreadState::current()->gcGroup()->heapStats().est imatedMarkingTime() && !Platform::current()->currentThread()->scheduler()->canEx ceedIdleDeadlineIfRequired()) {
752 // If marking is estimated to take longer than the deadline and we can't 967 // If marking is estimated to take longer than the deadline and we can't
753 // exceed the deadline, then reschedule for the next idle period. 968 // exceed the deadline, then reschedule for the next idle period.
754 scheduleIdleGC(); 969 scheduleIdleGC();
755 return; 970 return;
756 } 971 }
757 972
758 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC); 973 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC);
759 } 974 }
760 975
761 void ThreadState::performIdleLazySweep(double deadlineSeconds) 976 void ThreadState::performIdleLazySweep(double deadlineSeconds)
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
943 // Idle time GC will be scheduled by Blink Scheduler. 1158 // Idle time GC will be scheduled by Blink Scheduler.
944 break; 1159 break;
945 default: 1160 default:
946 break; 1161 break;
947 } 1162 }
948 } 1163 }
949 1164
950 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() 1165 void ThreadState::flushHeapDoesNotContainCacheIfNeeded()
951 { 1166 {
952 if (m_shouldFlushHeapDoesNotContainCache) { 1167 if (m_shouldFlushHeapDoesNotContainCache) {
953 Heap::flushHeapDoesNotContainCache(); 1168 gcGroup()->flushHeapDoesNotContainCache();
954 m_shouldFlushHeapDoesNotContainCache = false; 1169 m_shouldFlushHeapDoesNotContainCache = false;
955 } 1170 }
956 } 1171 }
957 1172
958 void ThreadState::makeConsistentForGC() 1173 void ThreadState::makeConsistentForGC()
959 { 1174 {
960 ASSERT(isInGC()); 1175 ASSERT(isInGC());
961 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); 1176 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC");
962 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 1177 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
963 m_heaps[i]->makeConsistentForGC(); 1178 m_heaps[i]->makeConsistentForGC();
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
1116 postSweep(); 1331 postSweep();
1117 } 1332 }
1118 1333
1119 void ThreadState::postSweep() 1334 void ThreadState::postSweep()
1120 { 1335 {
1121 ASSERT(checkThread()); 1336 ASSERT(checkThread());
1122 Heap::reportMemoryUsageForTracing(); 1337 Heap::reportMemoryUsageForTracing();
1123 1338
1124 if (isMainThread()) { 1339 if (isMainThread()) {
1125 double collectionRate = 0; 1340 double collectionRate = 0;
1126 if (Heap::objectSizeAtLastGC() > 0) 1341 if (gcGroup()->heapStats().objectSizeAtLastGC() > 0)
1127 collectionRate = 1 - 1.0 * Heap::markedObjectSize() / Heap::objectSi zeAtLastGC(); 1342 collectionRate = 1 - 1.0 * gcGroup()->heapStats().markedObjectSize() / gcGroup()->heapStats().objectSizeAtLastGC();
1128 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); 1343 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate));
1129 1344
1130 #if PRINT_HEAP_STATS 1345 #if PRINT_HEAP_STATS
1131 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); 1346 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate));
1132 #endif 1347 #endif
1133 1348
1134 // Heap::markedObjectSize() may be underestimated here if any other 1349 // Heap::markedObjectSize() may be underestimated here if any other
1135 // thread has not yet finished lazy sweeping. 1350 // thread has not yet finished lazy sweeping.
1136 Heap::setMarkedObjectSizeAtLastCompleteSweep(Heap::markedObjectSize()); 1351 gcGroup()->heapStats().setMarkedObjectSizeAtLastCompleteSweep(gcGroup()- >heapStats().markedObjectSize());
1137 1352
1138 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", Heap::objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50); 1353 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", gcGroup()->heapStats().objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50);
1139 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", Heap::markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50); 1354 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", gcGroup()->heapStats().markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50);
1140 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20); 1355 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20);
1141 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50); 1356 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50);
1142 } 1357 }
1143 1358
1144 switch (gcState()) { 1359 switch (gcState()) {
1145 case Sweeping: 1360 case Sweeping:
1146 setGCState(NoGCScheduled); 1361 setGCState(NoGCScheduled);
1147 break; 1362 break;
1148 case SweepingAndPreciseGCScheduled: 1363 case SweepingAndPreciseGCScheduled:
1149 setGCState(PreciseGCScheduled); 1364 setGCState(PreciseGCScheduled);
(...skipping 26 matching lines...) Expand all
1176 #endif 1391 #endif
1177 1392
1178 size_t ThreadState::objectPayloadSizeForTesting() 1393 size_t ThreadState::objectPayloadSizeForTesting()
1179 { 1394 {
1180 size_t objectPayloadSize = 0; 1395 size_t objectPayloadSize = 0;
1181 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 1396 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
1182 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); 1397 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting();
1183 return objectPayloadSize; 1398 return objectPayloadSize;
1184 } 1399 }
1185 1400
1186 bool ThreadState::stopThreads()
1187 {
1188 return s_safePointBarrier->parkOthers();
1189 }
1190
1191 void ThreadState::resumeThreads()
1192 {
1193 s_safePointBarrier->resumeOthers();
1194 }
1195
1196 void ThreadState::safePoint(BlinkGC::StackState stackState) 1401 void ThreadState::safePoint(BlinkGC::StackState stackState)
1197 { 1402 {
1198 ASSERT(checkThread()); 1403 ASSERT(checkThread());
1199 Heap::reportMemoryUsageForTracing(); 1404 Heap::reportMemoryUsageForTracing();
1200 1405
1201 runScheduledGC(stackState); 1406 runScheduledGC(stackState);
1202 ASSERT(!m_atSafePoint); 1407 ASSERT(!m_atSafePoint);
1203 m_stackState = stackState; 1408 m_stackState = stackState;
1204 m_atSafePoint = true; 1409 m_atSafePoint = true;
1205 s_safePointBarrier->checkAndPark(this); 1410 m_gcGroup->checkAndPark(this, nullptr);
1206 m_atSafePoint = false; 1411 m_atSafePoint = false;
1207 m_stackState = BlinkGC::HeapPointersOnStack; 1412 m_stackState = BlinkGC::HeapPointersOnStack;
1208 preSweep(); 1413 preSweep();
1209 } 1414 }
1210 1415
1211 #ifdef ADDRESS_SANITIZER 1416 #ifdef ADDRESS_SANITIZER
1212 // When we are running under AddressSanitizer with detect_stack_use_after_return =1 1417 // When we are running under AddressSanitizer with detect_stack_use_after_return =1
1213 // then stack marker obtained from SafePointScope will point into a fake stack. 1418 // then stack marker obtained from SafePointScope will point into a fake stack.
1214 // Detect this case by checking if it falls in between current stack frame 1419 // Detect this case by checking if it falls in between current stack frame
1215 // and stack start and use an arbitrary high enough value for it. 1420 // and stack start and use an arbitrary high enough value for it.
(...skipping 23 matching lines...) Expand all
1239 #ifdef ADDRESS_SANITIZER 1444 #ifdef ADDRESS_SANITIZER
1240 if (stackState == BlinkGC::HeapPointersOnStack) 1445 if (stackState == BlinkGC::HeapPointersOnStack)
1241 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); 1446 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
1242 #endif 1447 #endif
1243 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); 1448 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker);
1244 runScheduledGC(stackState); 1449 runScheduledGC(stackState);
1245 ASSERT(!m_atSafePoint); 1450 ASSERT(!m_atSafePoint);
1246 m_atSafePoint = true; 1451 m_atSafePoint = true;
1247 m_stackState = stackState; 1452 m_stackState = stackState;
1248 m_safePointScopeMarker = scopeMarker; 1453 m_safePointScopeMarker = scopeMarker;
1249 s_safePointBarrier->enterSafePoint(this); 1454 m_gcGroup->enterSafePoint(this);
1250 } 1455 }
1251 1456
1252 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) 1457 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
1253 { 1458 {
1254 ASSERT(checkThread()); 1459 ASSERT(checkThread());
1255 ASSERT(m_atSafePoint); 1460 ASSERT(m_atSafePoint);
1256 s_safePointBarrier->leaveSafePoint(this, locker); 1461 m_gcGroup->leaveSafePoint(this, locker);
1257 m_atSafePoint = false; 1462 m_atSafePoint = false;
1258 m_stackState = BlinkGC::HeapPointersOnStack; 1463 m_stackState = BlinkGC::HeapPointersOnStack;
1259 clearSafePointScopeMarker(); 1464 clearSafePointScopeMarker();
1260 preSweep(); 1465 preSweep();
1261 } 1466 }
1262 1467
1263 void ThreadState::copyStackUntilSafePointScope() 1468 void ThreadState::copyStackUntilSafePointScope()
1264 { 1469 {
1265 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k) 1470 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k)
1266 return; 1471 return;
(...skipping 17 matching lines...) Expand all
1284 for (size_t i = 0; i < slotCount; ++i) { 1489 for (size_t i = 0; i < slotCount; ++i) {
1285 m_safePointStackCopy[i] = from[i]; 1490 m_safePointStackCopy[i] = from[i];
1286 } 1491 }
1287 } 1492 }
1288 1493
1289 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor) 1494 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor)
1290 { 1495 {
1291 ASSERT(checkThread()); 1496 ASSERT(checkThread());
1292 SafePointScope scope(BlinkGC::HeapPointersOnStack); 1497 SafePointScope scope(BlinkGC::HeapPointersOnStack);
1293 { 1498 {
1294 MutexLocker locker(threadAttachMutex()); 1499 MutexLocker locker(gcGroup()->threadAttachMutex());
1295 m_interruptors.append(interruptor); 1500 m_interruptors.append(interruptor);
1296 } 1501 }
1297 } 1502 }
1298 1503
1299 void ThreadState::removeInterruptor(BlinkGCInterruptor* interruptor) 1504 void ThreadState::removeInterruptor(BlinkGCInterruptor* interruptor)
1300 { 1505 {
1301 ASSERT(checkThread()); 1506 ASSERT(checkThread());
1302 SafePointScope scope(BlinkGC::HeapPointersOnStack); 1507 SafePointScope scope(BlinkGC::HeapPointersOnStack);
1303 { 1508 {
1304 MutexLocker locker(threadAttachMutex()); 1509 MutexLocker locker(gcGroup()->threadAttachMutex());
1305 size_t index = m_interruptors.find(interruptor); 1510 size_t index = m_interruptors.find(interruptor);
1306 RELEASE_ASSERT(index != kNotFound); 1511 RELEASE_ASSERT(index != kNotFound);
1307 m_interruptors.remove(index); 1512 m_interruptors.remove(index);
1308 } 1513 }
1309 } 1514 }
1310 1515
1311 #if defined(LEAK_SANITIZER) 1516 #if defined(LEAK_SANITIZER)
1312 void ThreadState::registerStaticPersistentNode(PersistentNode* node) 1517 void ThreadState::registerStaticPersistentNode(PersistentNode* node)
1313 { 1518 {
1314 if (m_disabledStaticPersistentsRegistration) 1519 if (m_disabledStaticPersistentsRegistration)
(...skipping 22 matching lines...) Expand all
1337 m_disabledStaticPersistentsRegistration--; 1542 m_disabledStaticPersistentsRegistration--;
1338 } 1543 }
1339 #endif 1544 #endif
1340 1545
1341 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() 1546 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
1342 { 1547 {
1343 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); 1548 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
1344 return threads; 1549 return threads;
1345 } 1550 }
1346 1551
1347 void ThreadState::lockThreadAttachMutex()
1348 {
1349 threadAttachMutex().lock();
1350 }
1351
1352 void ThreadState::unlockThreadAttachMutex()
1353 {
1354 threadAttachMutex().unlock();
1355 }
1356
1357 void ThreadState::invokePreFinalizers() 1552 void ThreadState::invokePreFinalizers()
1358 { 1553 {
1359 ASSERT(checkThread()); 1554 ASSERT(checkThread());
1360 ASSERT(!sweepForbidden()); 1555 ASSERT(!sweepForbidden());
1361 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); 1556 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers");
1362 1557
1363 double startTime = WTF::currentTimeMS(); 1558 double startTime = WTF::currentTimeMS();
1364 if (!m_orderedPreFinalizers.isEmpty()) { 1559 if (!m_orderedPreFinalizers.isEmpty()) {
1365 SweepForbiddenScope sweepForbidden(this); 1560 SweepForbiddenScope sweepForbidden(this);
1366 ScriptForbiddenIfMainThreadScope scriptForbidden; 1561 ScriptForbiddenIfMainThreadScope scriptForbidden;
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
1508 threadDump->addScalar("live_count", "objects", totalLiveCount); 1703 threadDump->addScalar("live_count", "objects", totalLiveCount);
1509 threadDump->addScalar("dead_count", "objects", totalDeadCount); 1704 threadDump->addScalar("dead_count", "objects", totalDeadCount);
1510 threadDump->addScalar("live_size", "bytes", totalLiveSize); 1705 threadDump->addScalar("live_size", "bytes", totalLiveSize);
1511 threadDump->addScalar("dead_size", "bytes", totalDeadSize); 1706 threadDump->addScalar("dead_size", "bytes", totalDeadSize);
1512 1707
1513 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); 1708 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName);
1514 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); 1709 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName);
1515 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); 1710 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid());
1516 } 1711 }
1517 1712
1713 ThreadState* ThreadState::forObject(const void* object)
haraken 2016/02/12 11:28:52 forObject => fromObject (for consistency with from
keishi 2016/02/29 06:02:34 Done.
1714 {
1715 if (!object)
1716 return nullptr;
1717 BasePage* page = pageFromObject(object);
1718 ASSERT(page);
1719 ASSERT(page->heap());
1720 return page->heap()->threadState();
1721 }
1722
1518 } // namespace blink 1723 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698