Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 19 matching lines...) Expand all
30 30
31 #include "config.h" 31 #include "config.h"
32 #include "platform/heap/ThreadState.h" 32 #include "platform/heap/ThreadState.h"
33 33
34 #include "platform/ScriptForbiddenScope.h" 34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/TraceEvent.h" 35 #include "platform/TraceEvent.h"
36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h"
37 #include "platform/heap/CallbackStack.h" 37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/Handle.h" 38 #include "platform/heap/Handle.h"
39 #include "platform/heap/Heap.h" 39 #include "platform/heap/Heap.h"
40 #include "platform/heap/PagePool.h"
40 #include "platform/heap/MarkingVisitor.h" 41 #include "platform/heap/MarkingVisitor.h"
41 #include "platform/heap/SafePoint.h" 42 #include "platform/heap/SafePoint.h"
43 #include "platform/heap/PageMemory.h"
42 #include "public/platform/Platform.h" 44 #include "public/platform/Platform.h"
43 #include "public/platform/WebMemoryAllocatorDump.h" 45 #include "public/platform/WebMemoryAllocatorDump.h"
44 #include "public/platform/WebProcessMemoryDump.h" 46 #include "public/platform/WebProcessMemoryDump.h"
45 #include "public/platform/WebScheduler.h" 47 #include "public/platform/WebScheduler.h"
46 #include "public/platform/WebThread.h" 48 #include "public/platform/WebThread.h"
47 #include "public/platform/WebTraceLocation.h" 49 #include "public/platform/WebTraceLocation.h"
48 #include "wtf/DataLog.h" 50 #include "wtf/DataLog.h"
49 #include "wtf/Partitions.h" 51 #include "wtf/Partitions.h"
50 #include "wtf/ThreadingPrimitives.h" 52 #include "wtf/ThreadingPrimitives.h"
51 53
52 #if OS(WIN) 54 #if OS(WIN)
53 #include <stddef.h> 55 #include <stddef.h>
54 #include <windows.h> 56 #include <windows.h>
55 #include <winnt.h> 57 #include <winnt.h>
56 #elif defined(__GLIBC__) 58 #elif defined(__GLIBC__)
57 extern "C" void* __libc_stack_end; // NOLINT 59 extern "C" void* __libc_stack_end; // NOLINT
58 #endif 60 #endif
59 61
60 #if defined(MEMORY_SANITIZER) 62 #if defined(MEMORY_SANITIZER)
61 #include <sanitizer/msan_interface.h> 63 #include <sanitizer/msan_interface.h>
62 #endif 64 #endif
63 65
64 #if OS(FREEBSD) 66 #if OS(FREEBSD)
65 #include <pthread_np.h> 67 #include <pthread_np.h>
66 #endif 68 #endif
67 69
68 namespace blink { 70 namespace blink {
69 71
70 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 72 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
73 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecificTerminating = nu llptr;
71 uintptr_t ThreadState::s_mainThreadStackStart = 0; 74 uintptr_t ThreadState::s_mainThreadStackStart = 0;
72 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 75 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
73 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 76 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
74 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; 77 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr;
75 78
76 RecursiveMutex& ThreadState::threadAttachMutex() 79 RecursiveMutex& ThreadState::threadAttachMutex()
77 { 80 {
78 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex)); 81 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu tex));
79 return mutex; 82 return mutex;
80 } 83 }
81 84
82 ThreadState::ThreadState() 85 ThreadState::ThreadState()
83 : m_thread(currentThread()) 86 : m_thread(currentThread())
87 , m_isolated(false)
84 , m_persistentRegion(adoptPtr(new PersistentRegion())) 88 , m_persistentRegion(adoptPtr(new PersistentRegion()))
89 , m_xThreadPersistentRegion(adoptPtr(new XThreadPersistentRegion()))
85 #if OS(WIN) && COMPILER(MSVC) 90 #if OS(WIN) && COMPILER(MSVC)
86 , m_threadStackSize(0) 91 , m_threadStackSize(0)
87 #endif 92 #endif
88 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) 93 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( )))
89 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) 94 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) )
90 , m_safePointScopeMarker(nullptr) 95 , m_safePointScopeMarker(nullptr)
91 , m_atSafePoint(false) 96 , m_atSafePoint(false)
92 , m_interruptors() 97 , m_interruptors()
93 , m_sweepForbidden(false) 98 , m_sweepForbidden(false)
94 , m_noAllocationCount(0) 99 , m_noAllocationCount(0)
95 , m_gcForbiddenCount(0) 100 , m_gcForbiddenCount(0)
96 , m_accumulatedSweepingTime(0) 101 , m_accumulatedSweepingTime(0)
97 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) 102 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex)
98 , m_currentHeapAges(0) 103 , m_currentHeapAges(0)
99 , m_isTerminating(false) 104 , m_isTerminating(false)
100 , m_gcMixinMarker(nullptr) 105 , m_gcMixinMarker(nullptr)
101 , m_shouldFlushHeapDoesNotContainCache(false) 106 , m_shouldFlushHeapDoesNotContainCache(false)
102 , m_gcState(NoGCScheduled) 107 , m_gcState(NoGCScheduled)
103 , m_traceDOMWrappers(nullptr) 108 , m_traceDOMWrappers(nullptr)
104 #if defined(ADDRESS_SANITIZER) 109 #if defined(ADDRESS_SANITIZER)
105 , m_asanFakeStack(__asan_get_current_fake_stack()) 110 , m_asanFakeStack(__asan_get_current_fake_stack())
106 #endif 111 #endif
112 , m_markingStack(adoptPtr(new CallbackStack()))
113 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack()))
114 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack()))
115 , m_ephemeronStack(adoptPtr(new CallbackStack()))
116 , m_freePagePool(adoptPtr(new FreePagePool()))
117 , m_allocatedObjectSize(0)
118 , m_objectSizeAtLastGC(0)
119 , m_markedObjectSize(0)
120 , m_markedObjectSizeAtLastCompleteSweep(0)
121 , m_wrapperCount(0)
122 , m_wrapperCountAtLastGC(0)
123 , m_collectedWrapperCount(0)
124 , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages())
125 , m_estimatedMarkingTimePerByte(0.0)
126 #if ENABLE(ASSERT)
127 , m_gcGeneration(1)
128 #endif
129 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache()))
130 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool()))
131 , m_regionTree(nullptr)
107 { 132 {
108 ASSERT(checkThread()); 133 ASSERT(checkThread());
109 ASSERT(!**s_threadSpecific); 134 ASSERT(!**s_threadSpecific);
110 **s_threadSpecific = this; 135 **s_threadSpecific = this;
111 136
112 if (isMainThread()) { 137 if (isMainThread()) {
113 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); 138 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*);
114 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); 139 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size();
115 if (underestimatedStackSize > sizeof(void*)) 140 if (underestimatedStackSize > sizeof(void*))
116 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); 141 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*);
(...skipping 15 matching lines...) Expand all
132 delete m_threadLocalWeakCallbackStack; 157 delete m_threadLocalWeakCallbackStack;
133 m_threadLocalWeakCallbackStack = nullptr; 158 m_threadLocalWeakCallbackStack = nullptr;
134 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 159 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
135 delete m_heaps[i]; 160 delete m_heaps[i];
136 161
137 **s_threadSpecific = nullptr; 162 **s_threadSpecific = nullptr;
138 if (isMainThread()) { 163 if (isMainThread()) {
139 s_mainThreadStackStart = 0; 164 s_mainThreadStackStart = 0;
140 s_mainThreadUnderestimatedStackSize = 0; 165 s_mainThreadUnderestimatedStackSize = 0;
141 } 166 }
167 ASSERT(!**s_threadSpecificTerminating);
168 **s_threadSpecificTerminating = this;
haraken 2015/11/30 02:54:42 I wonder why we need this.
keishi 2016/01/06 05:35:33 Removed
142 } 169 }
143 170
144 void ThreadState::init() 171 void ThreadState::init()
145 { 172 {
146 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); 173 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
174 s_threadSpecificTerminating = new WTF::ThreadSpecific<ThreadState*>();
147 s_safePointBarrier = new SafePointBarrier; 175 s_safePointBarrier = new SafePointBarrier;
148 } 176 }
149 177
150 void ThreadState::shutdown() 178 void ThreadState::shutdown()
151 { 179 {
152 delete s_safePointBarrier; 180 delete s_safePointBarrier;
153 s_safePointBarrier = nullptr; 181 s_safePointBarrier = nullptr;
154 182
155 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic(). 183 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
156 } 184 }
(...skipping 28 matching lines...) Expand all
185 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx 213 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx
186 // explains the details. 214 // explains the details.
187 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); 215 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000);
188 m_threadStackSize -= 4 * 0x1000; 216 m_threadStackSize -= 4 * 0x1000;
189 return m_threadStackSize; 217 return m_threadStackSize;
190 } 218 }
191 #endif 219 #endif
192 220
193 void ThreadState::attachMainThread() 221 void ThreadState::attachMainThread()
194 { 222 {
195 RELEASE_ASSERT(!Heap::s_shutdownCalled); 223 //RELEASE_ASSERT(!m_shutdownCalled);
196 MutexLocker locker(threadAttachMutex()); 224 MutexLocker locker(threadAttachMutex());
197 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); 225 ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
198 attachedThreads().add(state); 226 attachedThreads().add(state);
199 } 227 }
200 228
201 void ThreadState::detachMainThread() 229 void ThreadState::detachMainThread()
202 { 230 {
203 // Enter a safe point before trying to acquire threadAttachMutex 231 // Enter a safe point before trying to acquire threadAttachMutex
204 // to avoid dead lock if another thread is preparing for GC, has acquired 232 // to avoid dead lock if another thread is preparing for GC, has acquired
205 // threadAttachMutex and waiting for other threads to pause or reach a 233 // threadAttachMutex and waiting for other threads to pause or reach a
(...skipping 13 matching lines...) Expand all
219 attachedThreads().remove(state); 247 attachedThreads().remove(state);
220 state->~ThreadState(); 248 state->~ThreadState();
221 } 249 }
222 shutdownHeapIfNecessary(); 250 shutdownHeapIfNecessary();
223 } 251 }
224 252
225 void ThreadState::shutdownHeapIfNecessary() 253 void ThreadState::shutdownHeapIfNecessary()
226 { 254 {
227 // We don't need to enter a safe point before acquiring threadAttachMutex 255 // We don't need to enter a safe point before acquiring threadAttachMutex
228 // because this thread is already detached. 256 // because this thread is already detached.
229 257 ASSERT(!ThreadState::current() && ThreadState::terminating());
230 MutexLocker locker(threadAttachMutex()); 258 MutexLocker locker(threadAttachMutex());
231 // We start shutting down the heap if there is no running thread 259 // We start shutting down the heap if there is no running thread
232 // and Heap::shutdown() is already called. 260 // and Heap::shutdown() is already called.
233 if (!attachedThreads().size() && Heap::s_shutdownCalled) 261 if (!attachedThreads().size()/* && ThreadState::terminating()->shutdownCalle d()*/)
234 Heap::doShutdown(); 262 Heap::doShutdown();
263 //ThreadState::terminating()->setShutdownCalled(true);
235 } 264 }
236 265
237 void ThreadState::attach() 266 void ThreadState::attach()
238 { 267 {
239 RELEASE_ASSERT(!Heap::s_shutdownCalled); 268 //RELEASE_ASSERT(!m_shutdownCalled);
240 MutexLocker locker(threadAttachMutex()); 269 MutexLocker locker(threadAttachMutex());
241 ThreadState* state = new ThreadState(); 270 ThreadState* state = new ThreadState();
242 attachedThreads().add(state); 271 attachedThreads().add(state);
243 } 272 }
244 273
245 void ThreadState::cleanupPages() 274 void ThreadState::cleanupPages()
246 { 275 {
247 ASSERT(checkThread()); 276 ASSERT(checkThread());
248 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 277 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
249 m_heaps[i]->cleanupPages(); 278 m_heaps[i]->cleanupPages();
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
308 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); 337 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled);
309 delete state; 338 delete state;
310 shutdownHeapIfNecessary(); 339 shutdownHeapIfNecessary();
311 } 340 }
312 341
313 void ThreadState::visitPersistentRoots(Visitor* visitor) 342 void ThreadState::visitPersistentRoots(Visitor* visitor)
314 { 343 {
315 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); 344 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
316 crossThreadPersistentRegion().tracePersistentNodes(visitor); 345 crossThreadPersistentRegion().tracePersistentNodes(visitor);
317 346
318 for (ThreadState* state : attachedThreads()) 347 for (ThreadState* state : attachedThreads()) {
319 state->visitPersistents(visitor); 348 if (!state->isolated())
haraken 2015/11/30 02:54:42 As commented somewhere, I recommend you not add is
keishi 2016/01/06 05:35:33 Done.
349 state->visitPersistents(visitor);
350 }
320 } 351 }
321 352
322 void ThreadState::visitStackRoots(Visitor* visitor) 353 void ThreadState::visitStackRoots(Visitor* visitor)
323 { 354 {
324 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); 355 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
325 for (ThreadState* state : attachedThreads()) 356 for (ThreadState* state : attachedThreads()) {
326 state->visitStack(visitor); 357 if (!state->isolated())
358 state->visitStack(visitor);
359 }
327 } 360 }
328 361
329 NO_SANITIZE_ADDRESS 362 NO_SANITIZE_ADDRESS
330 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 363 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
331 { 364 {
332 #if defined(ADDRESS_SANITIZER) 365 #if defined(ADDRESS_SANITIZER)
333 Address* start = reinterpret_cast<Address*>(m_startOfStack); 366 Address* start = reinterpret_cast<Address*>(m_startOfStack);
334 Address* end = reinterpret_cast<Address*>(m_endOfStack); 367 Address* end = reinterpret_cast<Address*>(m_endOfStack);
335 Address* fakeFrameStart = nullptr; 368 Address* fakeFrameStart = nullptr;
336 Address* fakeFrameEnd = nullptr; 369 Address* fakeFrameEnd = nullptr;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
492 } 525 }
493 526
494 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() 527 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion()
495 { 528 {
496 AtomicallyInitializedStaticReference(CrossThreadPersistentRegion, persistent Region, new CrossThreadPersistentRegion()); 529 AtomicallyInitializedStaticReference(CrossThreadPersistentRegion, persistent Region, new CrossThreadPersistentRegion());
497 return persistentRegion; 530 return persistentRegion;
498 } 531 }
499 532
500 size_t ThreadState::totalMemorySize() 533 size_t ThreadState::totalMemorySize()
501 { 534 {
502 return Heap::allocatedObjectSize() + Heap::markedObjectSize() + WTF::Partiti ons::totalSizeOfCommittedPages(); 535 return allocatedObjectSize() + markedObjectSize() + WTF::Partitions::totalSi zeOfCommittedPages();
503 } 536 }
504 537
505 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) 538 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC)
506 { 539 {
507 if (Heap::wrapperCountAtLastGC() == 0) { 540 if (wrapperCountAtLastGC() == 0) {
508 // We'll reach here only before hitting the first GC. 541 // We'll reach here only before hitting the first GC.
509 return 0; 542 return 0;
510 } 543 }
511 544
512 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); 545 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC);
513 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / Heap::wrapperCountAtLastGC() * Heap::collectedWrapperCount()); 546 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / wrapperCountAtLastGC() * collectedWrapperCount());
514 if (estimationBaseSize < sizeRetainedByCollectedPersistents) 547 if (estimationBaseSize < sizeRetainedByCollectedPersistents)
515 return 0; 548 return 0;
516 return estimationBaseSize - sizeRetainedByCollectedPersistents; 549 return estimationBaseSize - sizeRetainedByCollectedPersistents;
517 } 550 }
518 551
519 double ThreadState::heapGrowingRate() 552 double ThreadState::heapGrowingRate()
520 { 553 {
521 size_t currentSize = Heap::allocatedObjectSize() + Heap::markedObjectSize(); 554 size_t currentSize = allocatedObjectSize() + markedObjectSize();
522 size_t estimatedSize = estimatedLiveSize(Heap::markedObjectSizeAtLastComplet eSweep(), Heap::markedObjectSizeAtLastCompleteSweep()); 555 size_t estimatedSize = estimatedLiveSize(markedObjectSizeAtLastCompleteSweep (), markedObjectSizeAtLastCompleteSweep());
523 556
524 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 557 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
525 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 558 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
526 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); 559 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX)));
527 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); 560 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate));
528 return growingRate; 561 return growingRate;
529 } 562 }
530 563
531 double ThreadState::partitionAllocGrowingRate() 564 double ThreadState::partitionAllocGrowingRate()
532 { 565 {
533 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); 566 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages();
534 size_t estimatedSize = estimatedLiveSize(currentSize, Heap::partitionAllocSi zeAtLastGC()); 567 size_t estimatedSize = estimatedLiveSize(currentSize, partitionAllocSizeAtLa stGC());
535 568
536 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 569 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
537 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 570 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
538 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); 571 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX)));
539 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); 572 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate));
540 return growingRate; 573 return growingRate;
541 } 574 }
542 575
543 // TODO(haraken): We should improve the GC heuristics. The heuristics affect 576 // TODO(haraken): We should improve the GC heuristics. The heuristics affect
544 // performance significantly. 577 // performance significantly.
545 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) 578 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold)
546 { 579 {
547 // If the allocated object size or the total memory size is small, don't tri gger a GC. 580 // If the allocated object size or the total memory size is small, don't tri gger a GC.
548 if (Heap::allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMem orySizeThreshold) 581 if (allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMemorySiz eThreshold)
549 return false; 582 return false;
550 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, 583 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough,
551 // trigger a GC. 584 // trigger a GC.
552 #if PRINT_HEAP_STATS 585 #if PRINT_HEAP_STATS
553 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); 586 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate());
554 #endif 587 #endif
555 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; 588 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold;
556 } 589 }
557 590
558 bool ThreadState::shouldScheduleIdleGC() 591 bool ThreadState::shouldScheduleIdleGC()
(...skipping 24 matching lines...) Expand all
583 bool ThreadState::shouldForceMemoryPressureGC() 616 bool ThreadState::shouldForceMemoryPressureGC()
584 { 617 {
585 if (totalMemorySize() < 300 * 1024 * 1024) 618 if (totalMemorySize() < 300 * 1024 * 1024)
586 return false; 619 return false;
587 return judgeGCThreshold(0, 1.5); 620 return judgeGCThreshold(0, 1.5);
588 } 621 }
589 622
590 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) 623 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType)
591 { 624 {
592 ASSERT(checkThread()); 625 ASSERT(checkThread());
593 Heap::reportMemoryUsageForTracing(); 626 reportMemoryUsageForTracing();
594 627
595 #if PRINT_HEAP_STATS 628 #if PRINT_HEAP_STATS
596 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType = = BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); 629 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType = = BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC");
597 #endif 630 #endif
598 631
599 if (isGCForbidden()) 632 if (isGCForbidden())
600 return; 633 return;
601 634
602 // This completeSweep() will do nothing in common cases since we've 635 // This completeSweep() will do nothing in common cases since we've
603 // called completeSweep() before V8 starts minor/major GCs. 636 // called completeSweep() before V8 starts minor/major GCs.
604 completeSweep(); 637 completeSweep();
605 ASSERT(!isSweepingInProgress()); 638 ASSERT(!isSweepingInProgress());
606 ASSERT(!sweepForbidden()); 639 ASSERT(!sweepForbidden());
607 640
608 // TODO(haraken): Consider if we should trigger a memory pressure GC 641 // TODO(haraken): Consider if we should trigger a memory pressure GC
609 // for V8 minor GCs as well. 642 // for V8 minor GCs as well.
610 if (gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) { 643 if (gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) {
611 #if PRINT_HEAP_STATS 644 #if PRINT_HEAP_STATS
612 dataLogF("Scheduled MemoryPressureGC\n"); 645 dataLogF("Scheduled MemoryPressureGC\n");
613 #endif 646 #endif
614 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSwe ep, BlinkGC::MemoryPressureGC); 647 if (isolated())
648 Heap::collectGarbageForIsolatedThread(this);
haraken 2015/11/30 02:54:42 At the moment it is okay to ignore lazy sweeping (
keishi 2016/01/06 05:35:33 Done.
649 else
650 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::MemoryPressureGC);
615 return; 651 return;
616 } 652 }
617 if (shouldScheduleV8FollowupGC()) { 653 if (shouldScheduleV8FollowupGC()) {
618 #if PRINT_HEAP_STATS 654 #if PRINT_HEAP_STATS
619 dataLogF("Scheduled PreciseGC\n"); 655 dataLogF("Scheduled PreciseGC\n");
620 #endif 656 #endif
621 schedulePreciseGC(); 657 schedulePreciseGC();
622 return; 658 return;
623 } 659 }
624 if (gcType == BlinkGC::V8MajorGC) { 660 if (gcType == BlinkGC::V8MajorGC) {
(...skipping 17 matching lines...) Expand all
642 completeSweep(); 678 completeSweep();
643 679
644 // The fact that the PageNavigation GC is scheduled means that there is 680 // The fact that the PageNavigation GC is scheduled means that there is
645 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC => 681 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC =>
646 // Oilpan's GC is needed to collect the dead frame. So we force the 682 // Oilpan's GC is needed to collect the dead frame. So we force the
647 // PageNavigation GC before running the V8 GC. 683 // PageNavigation GC before running the V8 GC.
648 if (gcState() == PageNavigationGCScheduled) { 684 if (gcState() == PageNavigationGCScheduled) {
649 #if PRINT_HEAP_STATS 685 #if PRINT_HEAP_STATS
650 dataLogF("Scheduled PageNavigationGC\n"); 686 dataLogF("Scheduled PageNavigationGC\n");
651 #endif 687 #endif
652 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::PageNavigationGC); 688 if (isolated())
689 Heap::collectGarbageForIsolatedThread(this);
690 else
691 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSw eep, BlinkGC::PageNavigationGC);
653 } 692 }
654 } 693 }
655 694
656 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) 695 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio)
657 { 696 {
658 ASSERT(checkThread()); 697 ASSERT(checkThread());
659 Heap::reportMemoryUsageForTracing(); 698 reportMemoryUsageForTracing();
660 699
661 #if PRINT_HEAP_STATS 700 #if PRINT_HEAP_STATS
662 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat io=%.2lf)\n", estimatedRemovalRatio); 701 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat io=%.2lf)\n", estimatedRemovalRatio);
663 #endif 702 #endif
664 703
665 if (isGCForbidden()) 704 if (isGCForbidden())
666 return; 705 return;
667 706
668 // Finish on-going lazy sweeping. 707 // Finish on-going lazy sweeping.
669 // TODO(haraken): It might not make sense to force completeSweep() for all 708 // TODO(haraken): It might not make sense to force completeSweep() for all
(...skipping 21 matching lines...) Expand all
691 void ThreadState::schedulePageNavigationGC() 730 void ThreadState::schedulePageNavigationGC()
692 { 731 {
693 ASSERT(checkThread()); 732 ASSERT(checkThread());
694 ASSERT(!isSweepingInProgress()); 733 ASSERT(!isSweepingInProgress());
695 setGCState(PageNavigationGCScheduled); 734 setGCState(PageNavigationGCScheduled);
696 } 735 }
697 736
698 void ThreadState::scheduleGCIfNeeded() 737 void ThreadState::scheduleGCIfNeeded()
699 { 738 {
700 ASSERT(checkThread()); 739 ASSERT(checkThread());
701 Heap::reportMemoryUsageForTracing(); 740 reportMemoryUsageForTracing();
702 741
703 #if PRINT_HEAP_STATS 742 #if PRINT_HEAP_STATS
704 dataLogF("ThreadState::scheduleGCIfNeeded\n"); 743 dataLogF("ThreadState::scheduleGCIfNeeded\n");
705 #endif 744 #endif
706 745
707 // Allocation is allowed during sweeping, but those allocations should not 746 // Allocation is allowed during sweeping, but those allocations should not
708 // trigger nested GCs. 747 // trigger nested GCs.
709 if (isGCForbidden()) 748 if (isGCForbidden())
710 return; 749 return;
711 750
(...skipping 11 matching lines...) Expand all
723 return; 762 return;
724 } 763 }
725 } 764 }
726 765
727 if (shouldForceConservativeGC()) { 766 if (shouldForceConservativeGC()) {
728 completeSweep(); 767 completeSweep();
729 if (shouldForceConservativeGC()) { 768 if (shouldForceConservativeGC()) {
730 #if PRINT_HEAP_STATS 769 #if PRINT_HEAP_STATS
731 dataLogF("Scheduled ConservativeGC\n"); 770 dataLogF("Scheduled ConservativeGC\n");
732 #endif 771 #endif
733 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::ConservativeGC); 772 if (isolated())
773 Heap::collectGarbageForIsolatedThread(this);
774 else
775 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWi thoutSweep, BlinkGC::ConservativeGC);
734 return; 776 return;
735 } 777 }
736 } 778 }
737 if (shouldScheduleIdleGC()) { 779 if (shouldScheduleIdleGC()) {
738 #if PRINT_HEAP_STATS 780 #if PRINT_HEAP_STATS
739 dataLogF("Scheduled IdleGC\n"); 781 dataLogF("Scheduled IdleGC\n");
740 #endif 782 #endif
741 scheduleIdleGC(); 783 scheduleIdleGC();
742 return; 784 return;
743 } 785 }
744 } 786 }
745 787
746 void ThreadState::performIdleGC(double deadlineSeconds) 788 void ThreadState::performIdleGC(double deadlineSeconds)
747 { 789 {
748 ASSERT(checkThread()); 790 ASSERT(checkThread());
749 ASSERT(isMainThread()); 791 ASSERT(isMainThread());
750 792
751 if (gcState() != IdleGCScheduled) 793 if (gcState() != IdleGCScheduled)
752 return; 794 return;
753 795
754 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds(); 796 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds();
755 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); 797 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", estimatedMarkingTime());
756 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current ()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { 798 if (idleDeltaInSeconds <= estimatedMarkingTime() && !Platform::current()->cu rrentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) {
757 // If marking is estimated to take longer than the deadline and we can't 799 // If marking is estimated to take longer than the deadline and we can't
758 // exceed the deadline, then reschedule for the next idle period. 800 // exceed the deadline, then reschedule for the next idle period.
759 scheduleIdleGC(); 801 scheduleIdleGC();
760 return; 802 return;
761 } 803 }
762 804
763 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC); 805 if (isolated())
806 Heap::collectGarbageForIsolatedThread(this);
807 else
808 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS weep, BlinkGC::IdleGC);
764 } 809 }
765 810
766 void ThreadState::performIdleLazySweep(double deadlineSeconds) 811 void ThreadState::performIdleLazySweep(double deadlineSeconds)
767 { 812 {
768 ASSERT(checkThread()); 813 ASSERT(checkThread());
769 ASSERT(isMainThread()); 814 ASSERT(isMainThread());
770 815
771 // If we are not in a sweeping phase, there is nothing to do here. 816 // If we are not in a sweeping phase, there is nothing to do here.
772 if (!isSweepingInProgress()) 817 if (!isSweepingInProgress())
773 return; 818 return;
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
927 // Prevent that from happening by marking GCs as forbidden while 972 // Prevent that from happening by marking GCs as forbidden while
928 // one is initiated and later running. 973 // one is initiated and later running.
929 if (isGCForbidden()) 974 if (isGCForbidden())
930 return; 975 return;
931 976
932 switch (gcState()) { 977 switch (gcState()) {
933 case FullGCScheduled: 978 case FullGCScheduled:
934 Heap::collectAllGarbage(); 979 Heap::collectAllGarbage();
935 break; 980 break;
936 case PreciseGCScheduled: 981 case PreciseGCScheduled:
937 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS weep, BlinkGC::PreciseGC); 982 if (isolated())
983 Heap::collectGarbageForIsolatedThread(this);
984 else
985 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWith outSweep, BlinkGC::PreciseGC);
938 break; 986 break;
939 case PageNavigationGCScheduled: 987 case PageNavigationGCScheduled:
940 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSwee p, BlinkGC::PageNavigationGC); 988 if (isolated())
989 Heap::collectGarbageForIsolatedThread(this);
990 else
991 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWith Sweep, BlinkGC::PageNavigationGC);
941 break; 992 break;
942 case IdleGCScheduled: 993 case IdleGCScheduled:
943 // Idle time GC will be scheduled by Blink Scheduler. 994 // Idle time GC will be scheduled by Blink Scheduler.
944 break; 995 break;
945 default: 996 default:
946 break; 997 break;
947 } 998 }
948 } 999 }
949 1000
950 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() 1001 void ThreadState::flushHeapDoesNotContainCacheIfNeeded()
951 { 1002 {
952 if (m_shouldFlushHeapDoesNotContainCache) { 1003 if (m_shouldFlushHeapDoesNotContainCache) {
953 Heap::flushHeapDoesNotContainCache(); 1004 flushHeapDoesNotContainCache();
954 m_shouldFlushHeapDoesNotContainCache = false; 1005 m_shouldFlushHeapDoesNotContainCache = false;
955 } 1006 }
956 } 1007 }
957 1008
958 void ThreadState::makeConsistentForGC() 1009 void ThreadState::makeConsistentForGC()
959 { 1010 {
960 ASSERT(isInGC()); 1011 ASSERT(isInGC());
961 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); 1012 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC");
962 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) 1013 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i)
963 m_heaps[i]->makeConsistentForGC(); 1014 m_heaps[i]->makeConsistentForGC();
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
1121 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", timeForCompleteSweep, 1, 10 * 1000, 50); 1172 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", timeForCompleteSweep, 1, 10 * 1000, 50);
1122 } 1173 }
1123 } 1174 }
1124 1175
1125 postSweep(); 1176 postSweep();
1126 } 1177 }
1127 1178
1128 void ThreadState::postSweep() 1179 void ThreadState::postSweep()
1129 { 1180 {
1130 ASSERT(checkThread()); 1181 ASSERT(checkThread());
1131 Heap::reportMemoryUsageForTracing(); 1182 reportMemoryUsageForTracing();
1132 1183
1133 if (isMainThread()) { 1184 if (isMainThread()) {
1134 double collectionRate = 0; 1185 double collectionRate = 0;
1135 if (Heap::objectSizeAtLastGC() > 0) 1186 if (objectSizeAtLastGC() > 0)
1136 collectionRate = 1 - 1.0 * Heap::markedObjectSize() / Heap::objectSi zeAtLastGC(); 1187 collectionRate = 1 - 1.0 * markedObjectSize() / objectSizeAtLastGC() ;
1137 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); 1188 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate));
1138 1189
1139 #if PRINT_HEAP_STATS 1190 #if PRINT_HEAP_STATS
1140 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); 1191 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate));
1141 #endif 1192 #endif
1142 1193
1143 // Heap::markedObjectSize() may be underestimated here if any other 1194 // Heap::markedObjectSize() may be underestimated here if any other
1144 // thread has not yet finished lazy sweeping. 1195 // thread has not yet finished lazy sweeping.
1145 Heap::setMarkedObjectSizeAtLastCompleteSweep(Heap::markedObjectSize()); 1196 setMarkedObjectSizeAtLastCompleteSweep(markedObjectSize());
1146 1197
1147 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", Heap::objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50); 1198 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50);
1148 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", Heap::markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50); 1199 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50);
1149 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20); 1200 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20);
1150 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50); 1201 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50);
1151 } 1202 }
1152 1203
1153 switch (gcState()) { 1204 switch (gcState()) {
1154 case Sweeping: 1205 case Sweeping:
1155 setGCState(NoGCScheduled); 1206 setGCState(NoGCScheduled);
1156 break; 1207 break;
1157 case SweepingAndPreciseGCScheduled: 1208 case SweepingAndPreciseGCScheduled:
1158 setGCState(PreciseGCScheduled); 1209 setGCState(PreciseGCScheduled);
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1198 } 1249 }
1199 1250
1200 void ThreadState::resumeThreads() 1251 void ThreadState::resumeThreads()
1201 { 1252 {
1202 s_safePointBarrier->resumeOthers(); 1253 s_safePointBarrier->resumeOthers();
1203 } 1254 }
1204 1255
1205 void ThreadState::safePoint(BlinkGC::StackState stackState) 1256 void ThreadState::safePoint(BlinkGC::StackState stackState)
1206 { 1257 {
1207 ASSERT(checkThread()); 1258 ASSERT(checkThread());
1208 Heap::reportMemoryUsageForTracing(); 1259 reportMemoryUsageForTracing();
1209 1260
1210 runScheduledGC(stackState); 1261 runScheduledGC(stackState);
1211 ASSERT(!m_atSafePoint); 1262 ASSERT(!m_atSafePoint);
1212 m_stackState = stackState; 1263 m_stackState = stackState;
1213 m_atSafePoint = true; 1264 m_atSafePoint = true;
1214 s_safePointBarrier->checkAndPark(this); 1265 s_safePointBarrier->checkAndPark(this);
1215 m_atSafePoint = false; 1266 m_atSafePoint = false;
1216 m_stackState = BlinkGC::HeapPointersOnStack; 1267 m_stackState = BlinkGC::HeapPointersOnStack;
1217 preSweep(); 1268 preSweep();
1218 } 1269 }
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after
1482 threadDump->addScalar("live_count", "objects", totalLiveCount); 1533 threadDump->addScalar("live_count", "objects", totalLiveCount);
1483 threadDump->addScalar("dead_count", "objects", totalDeadCount); 1534 threadDump->addScalar("dead_count", "objects", totalDeadCount);
1484 threadDump->addScalar("live_size", "bytes", totalLiveSize); 1535 threadDump->addScalar("live_size", "bytes", totalLiveSize);
1485 threadDump->addScalar("dead_size", "bytes", totalDeadSize); 1536 threadDump->addScalar("dead_size", "bytes", totalDeadSize);
1486 1537
1487 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); 1538 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName);
1488 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); 1539 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName);
1489 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); 1540 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid());
1490 } 1541 }
1491 1542
1543 double ThreadState::estimatedMarkingTime()
1544 {
1545 // Use 8 ms as initial estimated marking time.
1546 // 8 ms is long enough for low-end mobile devices to mark common
1547 // real-world object graphs.
1548 if (m_estimatedMarkingTimePerByte == 0)
1549 return 0.008;
1550
1551 // Assuming that the collection rate of this GC will be mostly equal to
1552 // the collection rate of the last GC, estimate the marking time of this GC.
1553 return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObject Size());
1554 }
1555
1556 void ThreadState::reportMemoryUsageHistogram()
1557 {
1558 static size_t supportedMaxSizeInMB = 4 * 1024;
1559 static size_t observedMaxSizeInMB = 0;
1560
1561 // FIXME: Separate report per thread
1562
1563 // +1 is for rounding up the sizeInMB.
1564 size_t sizeInMB = allocatedSpace() / 1024 / 1024 + 1;
1565 if (sizeInMB >= supportedMaxSizeInMB)
1566 sizeInMB = supportedMaxSizeInMB - 1;
1567 if (sizeInMB > observedMaxSizeInMB) {
1568 // Send a UseCounter only when we see the highest memory usage
1569 // we've ever seen.
1570 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB);
1571 observedMaxSizeInMB = sizeInMB;
1572 }
1573 }
1574
1575 void ThreadState::reportMemoryUsageForTracing()
1576 {
1577 // FIXME: Separate report per thread
1578 #if PRINT_HEAP_STATS
1579 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount());
1580 #endif
1581
1582 bool gcTracingEnabled;
1583 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
1584 if (!gcTracingEnabled)
1585 return;
1586
1587 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints).
1588 // They are capped to INT_MAX just in case.
1589 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(allocated ObjectSize() / 1024, static_cast<size_t>(INT_MAX)));
1590 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(markedObject Size() / 1024, static_cast<size_t>(INT_MAX)));
1591 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
1592 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(allocatedSpace () / 1024, static_cast<size_t>(INT_MAX)));
1593 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX)));
1594 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(wrapperCount(), static_cast<size_t>(INT_MAX)));
1595 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX)));
1596 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(collectedWrapperCount(), static_cast<size_t>(INT_MAX)));
1597 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(partitionAllocSizeAtLastGC() / 1024, static_cast<size_t >(INT_MAX)));
1598 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX)));
1599 }
1600
1601 void ThreadState::resetHeapCounters()
1602 {
1603 ASSERT(isInGC());
1604
1605 reportMemoryUsageForTracing();
1606
1607 m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize;
1608 m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
1609 m_allocatedObjectSize = 0;
1610 m_markedObjectSize = 0;
1611 m_wrapperCountAtLastGC = m_wrapperCount;
1612 m_collectedWrapperCount = 0;
1613 }
1614
1615 void ThreadState::flushHeapDoesNotContainCache()
1616 {
1617 m_heapDoesNotContainCache->flush();
1618 }
1619
1620 PageMemoryRegion* ThreadState::RegionTree::lookup(Address address)
1621 {
1622 RegionTree* current = ThreadState::current()->regionTree();
1623 while (current) {
1624 Address base = current->m_region->base();
1625 if (address < base) {
1626 current = current->m_left;
1627 continue;
1628 }
1629 if (address >= base + current->m_region->size()) {
1630 current = current->m_right;
1631 continue;
1632 }
1633 ASSERT(current->m_region->contains(address));
1634 return current->m_region;
1635 }
1636 return nullptr;
1637 }
1638
1639 void ThreadState::RegionTree::add(RegionTree* newTree, RegionTree** context)
1640 {
1641 ASSERT(newTree);
1642 Address base = newTree->m_region->base();
1643 for (RegionTree* current = *context; current; current = *context) {
1644 ASSERT(!current->m_region->contains(base));
1645 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
1646 }
1647 *context = newTree;
1648 }
1649
1650 void ThreadState::RegionTree::remove(PageMemoryRegion* region, RegionTree** cont ext)
1651 {
1652 ASSERT(region);
1653 ASSERT(context);
1654 Address base = region->base();
1655 RegionTree* current = *context;
1656 for (; current; current = *context) {
1657 if (region == current->m_region)
1658 break;
1659 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
1660 }
1661
1662 // Shutdown via detachMainThread might not have populated the region tree.
1663 if (!current)
1664 return;
1665
1666 *context = nullptr;
1667 if (current->m_left) {
1668 add(current->m_left, context);
1669 current->m_left = nullptr;
1670 }
1671 if (current->m_right) {
1672 add(current->m_right, context);
1673 current->m_right = nullptr;
1674 }
1675 delete current;
1676 }
1677
1492 } // namespace blink 1678 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698