Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.cpp

Issue 1892713003: Prepare for multiple ThreadHeaps (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
65 #endif 65 #endif
66 66
67 #include <v8.h> 67 #include <v8.h>
68 68
69 namespace blink { 69 namespace blink {
70 70
71 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; 71 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr;
72 uintptr_t ThreadState::s_mainThreadStackStart = 0; 72 uintptr_t ThreadState::s_mainThreadStackStart = 0;
73 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 73 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
74 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 74 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
75 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr;
76
77 RecursiveMutex& ThreadState::threadAttachMutex()
78 {
79 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ;
80 return mutex;
81 }
82 75
83 ThreadState::ThreadState() 76 ThreadState::ThreadState()
84 : m_thread(currentThread()) 77 : m_thread(currentThread())
85 , m_persistentRegion(adoptPtr(new PersistentRegion())) 78 , m_persistentRegion(adoptPtr(new PersistentRegion()))
86 #if OS(WIN) && COMPILER(MSVC) 79 #if OS(WIN) && COMPILER(MSVC)
87 , m_threadStackSize(0) 80 , m_threadStackSize(0)
88 #endif 81 #endif
89 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) 82 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( )))
90 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) 83 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) )
91 , m_safePointScopeMarker(nullptr) 84 , m_safePointScopeMarker(nullptr)
(...skipping 23 matching lines...) Expand all
115 { 108 {
116 ASSERT(checkThread()); 109 ASSERT(checkThread());
117 ASSERT(!**s_threadSpecific); 110 ASSERT(!**s_threadSpecific);
118 **s_threadSpecific = this; 111 **s_threadSpecific = this;
119 112
120 if (isMainThread()) { 113 if (isMainThread()) {
121 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); 114 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*);
122 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); 115 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size();
123 if (underestimatedStackSize > sizeof(void*)) 116 if (underestimatedStackSize > sizeof(void*))
124 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); 117 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*);
118 m_heap = new ThreadHeap();
119 } else {
120 m_heap = &ThreadState::mainThreadState()->heap();
125 } 121 }
122 ASSERT(m_heap);
123 m_heap->attach(this);
126 124
127 for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex; arenaI ndex++) 125 for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex; arenaI ndex++)
128 m_arenas[arenaIndex] = new NormalPageArena(this, arenaIndex); 126 m_arenas[arenaIndex] = new NormalPageArena(this, arenaIndex);
129 m_arenas[BlinkGC::LargeObjectArenaIndex] = new LargeObjectArena(this, BlinkG C::LargeObjectArenaIndex); 127 m_arenas[BlinkGC::LargeObjectArenaIndex] = new LargeObjectArena(this, BlinkG C::LargeObjectArenaIndex);
130 128
131 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]); 129 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]);
132 clearArenaAges(); 130 clearArenaAges();
133 131
134 // There is little use of weak references and collections off the main threa d; 132 // There is little use of weak references and collections off the main threa d;
135 // use a much lower initial block reservation. 133 // use a much lower initial block reservation.
136 size_t initialBlockSize = isMainThread() ? CallbackStack::kDefaultBlockSize : CallbackStack::kMinimalBlockSize; 134 size_t initialBlockSize = isMainThread() ? CallbackStack::kDefaultBlockSize : CallbackStack::kMinimalBlockSize;
137 m_threadLocalWeakCallbackStack = new CallbackStack(initialBlockSize); 135 m_threadLocalWeakCallbackStack = new CallbackStack(initialBlockSize);
138 } 136 }
139 137
140 ThreadState::~ThreadState() 138 ThreadState::~ThreadState()
141 { 139 {
142 ASSERT(checkThread()); 140 ASSERT(checkThread());
143 delete m_threadLocalWeakCallbackStack; 141 delete m_threadLocalWeakCallbackStack;
144 m_threadLocalWeakCallbackStack = nullptr; 142 m_threadLocalWeakCallbackStack = nullptr;
145 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) 143 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
146 delete m_arenas[i]; 144 delete m_arenas[i];
147 145
148 **s_threadSpecific = nullptr; 146 **s_threadSpecific = nullptr;
149 if (isMainThread()) { 147 if (isMainThread()) {
150 s_mainThreadStackStart = 0; 148 s_mainThreadStackStart = 0;
151 s_mainThreadUnderestimatedStackSize = 0; 149 s_mainThreadUnderestimatedStackSize = 0;
152 } 150 }
153 } 151 }
154 152
155 void ThreadState::init()
156 {
157 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
158 s_safePointBarrier = new SafePointBarrier;
159 }
160
161 void ThreadState::shutdown()
162 {
163 delete s_safePointBarrier;
164 s_safePointBarrier = nullptr;
165
166 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpeci fic().
167 }
168
169 #if OS(WIN) && COMPILER(MSVC) 153 #if OS(WIN) && COMPILER(MSVC)
170 size_t ThreadState::threadStackSize() 154 size_t ThreadState::threadStackSize()
171 { 155 {
172 if (m_threadStackSize) 156 if (m_threadStackSize)
173 return m_threadStackSize; 157 return m_threadStackSize;
174 158
175 // Notice that we cannot use the TIB's StackLimit for the stack end, as it 159 // Notice that we cannot use the TIB's StackLimit for the stack end, as it
176 // tracks the end of the committed range. We're after the end of the reserve d 160 // tracks the end of the committed range. We're after the end of the reserve d
177 // stack area (most of which will be uncommitted, most times.) 161 // stack area (most of which will be uncommitted, most times.)
178 MEMORY_BASIC_INFORMATION stackInfo; 162 MEMORY_BASIC_INFORMATION stackInfo;
(...skipping 17 matching lines...) Expand all
196 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx 180 // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-m anagement.aspx
197 // explains the details. 181 // explains the details.
198 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); 182 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000);
199 m_threadStackSize -= 4 * 0x1000; 183 m_threadStackSize -= 4 * 0x1000;
200 return m_threadStackSize; 184 return m_threadStackSize;
201 } 185 }
202 #endif 186 #endif
203 187
204 void ThreadState::attachMainThread() 188 void ThreadState::attachMainThread()
205 { 189 {
206 MutexLocker locker(threadAttachMutex()); 190 RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete);
207 ThreadState* state = new (s_mainThreadStateStorage) ThreadState(); 191 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
208 attachedThreads().add(state); 192 new (s_mainThreadStateStorage) ThreadState();
209 } 193 }
210 194
195 void ThreadState::attachCurrentThread()
196 {
197 RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete);
198 new ThreadState();
199 }
200
201 void ThreadState::cleanupPages()
202 {
203 ASSERT(checkThread());
204 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
205 m_arenas[i]->cleanupPages();
206 }
207
208 void ThreadState::runTerminationGC()
209 {
210 if (isMainThread()) {
211 cleanupPages();
212 return;
213 }
214 ASSERT(checkThread());
215
216 // Finish sweeping.
217 completeSweep();
218
219 // From here on ignore all conservatively discovered
220 // pointers into the heap owned by this thread.
221 m_isTerminating = true;
222
223 releaseStaticPersistentNodes();
224
225 // Set the terminate flag on all heap pages of this thread. This is used to
226 // ensure we don't trace pages on other threads that are not part of the
227 // thread local GC.
228 prepareForThreadStateTermination();
229
230 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination( this);
231
232 // Do thread local GC's as long as the count of thread local Persistents
233 // changes and is above zero.
234 int oldCount = -1;
235 int currentCount = getPersistentRegion()->numberOfPersistents();
236 ASSERT(currentCount >= 0);
237 while (currentCount != oldCount) {
238 ThreadHeap::collectGarbageForTerminatingThread(this);
239 oldCount = currentCount;
240 currentCount = getPersistentRegion()->numberOfPersistents();
241 }
242 // We should not have any persistents left when getting to this point,
243 // if we have it is probably a bug so adding a debug ASSERT to catch this.
244 ASSERT(!currentCount);
245 // All of pre-finalizers should be consumed.
246 ASSERT(m_orderedPreFinalizers.isEmpty());
247 RELEASE_ASSERT(gcState() == NoGCScheduled);
248
249 // Add pages to the orphaned page pool to ensure any global GCs from this po int
250 // on will not trace objects on this thread's arenas.
251 cleanupPages();
252 }
211 253
212 void ThreadState::cleanupMainThread() 254 void ThreadState::cleanupMainThread()
213 { 255 {
214 ASSERT(isMainThread()); 256 ASSERT(isMainThread());
215 257
216 releaseStaticPersistentNodes(); 258 releaseStaticPersistentNodes();
217 259
218 #if defined(LEAK_SANITIZER) 260 #if defined(LEAK_SANITIZER)
219 // If LSan is about to perform leak detection, after having released all 261 // If LSan is about to perform leak detection, after having released all
220 // the registered static Persistent<> root references to global caches 262 // the registered static Persistent<> root references to global caches
(...skipping 16 matching lines...) Expand all
237 enterGCForbiddenScope(); 279 enterGCForbiddenScope();
238 } 280 }
239 281
240 void ThreadState::detachMainThread() 282 void ThreadState::detachMainThread()
241 { 283 {
242 // Enter a safe point before trying to acquire threadAttachMutex 284 // Enter a safe point before trying to acquire threadAttachMutex
243 // to avoid dead lock if another thread is preparing for GC, has acquired 285 // to avoid dead lock if another thread is preparing for GC, has acquired
244 // threadAttachMutex and waiting for other threads to pause or reach a 286 // threadAttachMutex and waiting for other threads to pause or reach a
245 // safepoint. 287 // safepoint.
246 ThreadState* state = mainThreadState(); 288 ThreadState* state = mainThreadState();
247 ASSERT(state == ThreadState::current());
248 ASSERT(state->checkThread());
249 ASSERT(!state->isSweepingInProgress()); 289 ASSERT(!state->isSweepingInProgress());
250 290
251 // The main thread must be the last thread that gets detached. 291 state->heap().detach(state);
252 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 1);
253
254 // Add the main thread's heap pages to the orphaned pool.
255 state->cleanupPages();
256
257 // Detach the main thread. We don't need to grab a lock because
258 // the main thread should be the last thread that gets detached.
259 ASSERT(attachedThreads().contains(state));
260 attachedThreads().remove(state);
261 state->~ThreadState(); 292 state->~ThreadState();
262 } 293 }
263 294
264 void ThreadState::attach() 295 void ThreadState::detachCurrentThread()
265 {
266 MutexLocker locker(threadAttachMutex());
267 ThreadState* state = new ThreadState();
268 attachedThreads().add(state);
269 }
270
271 void ThreadState::cleanupPages()
272 {
273 ASSERT(checkThread());
274 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
275 m_arenas[i]->cleanupPages();
276 }
277
278 void ThreadState::cleanup()
279 {
280 ASSERT(checkThread());
281 {
282 // Grab the threadAttachMutex to ensure only one thread can shutdown at
283 // a time and that no other thread can do a global GC. It also allows
284 // safe iteration of the attachedThreads set which happens as part of
285 // thread local GC asserts. We enter a safepoint while waiting for the
286 // lock to avoid a dead-lock where another thread has already requested
287 // GC.
288 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack);
289
290 // Finish sweeping.
291 completeSweep();
292
293 // From here on ignore all conservatively discovered
294 // pointers into the heap owned by this thread.
295 m_isTerminating = true;
296
297 releaseStaticPersistentNodes();
298
299 // Set the terminate flag on all heap pages of this thread. This is used to
300 // ensure we don't trace pages on other threads that are not part of the
301 // thread local GC.
302 prepareForThreadStateTermination();
303
304 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTerminat ion(this);
305
306 // Do thread local GC's as long as the count of thread local Persistents
307 // changes and is above zero.
308 int oldCount = -1;
309 int currentCount = getPersistentRegion()->numberOfPersistents();
310 ASSERT(currentCount >= 0);
311 while (currentCount != oldCount) {
312 ThreadHeap::collectGarbageForTerminatingThread(this);
313 oldCount = currentCount;
314 currentCount = getPersistentRegion()->numberOfPersistents();
315 }
316 // We should not have any persistents left when getting to this point,
317 // if we have it is probably a bug so adding a debug ASSERT to catch thi s.
318 ASSERT(!currentCount);
319 // All of pre-finalizers should be consumed.
320 ASSERT(m_orderedPreFinalizers.isEmpty());
321 RELEASE_ASSERT(gcState() == NoGCScheduled);
322
323 // Add pages to the orphaned page pool to ensure any global GCs from thi s point
324 // on will not trace objects on this thread's arenas.
325 cleanupPages();
326
327 ASSERT(attachedThreads().contains(this));
328 attachedThreads().remove(this);
329 }
330 }
331
332 void ThreadState::detach()
333 { 296 {
334 ThreadState* state = current(); 297 ThreadState* state = current();
335 state->cleanup(); 298 state->heap().detach(state);
336 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); 299 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled);
337 delete state; 300 delete state;
338 } 301 }
339 302
340 void ThreadState::visitPersistentRoots(Visitor* visitor)
341 {
342 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
343 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
344
345 for (ThreadState* state : attachedThreads())
346 state->visitPersistents(visitor);
347 }
348
349 void ThreadState::visitStackRoots(Visitor* visitor)
350 {
351 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
352 for (ThreadState* state : attachedThreads())
353 state->visitStack(visitor);
354 }
355
356 NO_SANITIZE_ADDRESS 303 NO_SANITIZE_ADDRESS
357 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) 304 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
358 { 305 {
359 #if defined(ADDRESS_SANITIZER) 306 #if defined(ADDRESS_SANITIZER)
360 Address* start = reinterpret_cast<Address*>(m_startOfStack); 307 Address* start = reinterpret_cast<Address*>(m_startOfStack);
361 Address* end = reinterpret_cast<Address*>(m_endOfStack); 308 Address* end = reinterpret_cast<Address*>(m_endOfStack);
362 Address* fakeFrameStart = nullptr; 309 Address* fakeFrameStart = nullptr;
363 Address* fakeFrameEnd = nullptr; 310 Address* fakeFrameEnd = nullptr;
364 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); 311 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
365 Address* realFrameForFakeFrame = 312 Address* realFrameForFakeFrame =
366 reinterpret_cast<Address*>( 313 reinterpret_cast<Address*>(
367 __asan_addr_is_in_fake_stack( 314 __asan_addr_is_in_fake_stack(
368 m_asanFakeStack, maybeFakeFrame, 315 m_asanFakeStack, maybeFakeFrame,
369 reinterpret_cast<void**>(&fakeFrameStart), 316 reinterpret_cast<void**>(&fakeFrameStart),
370 reinterpret_cast<void**>(&fakeFrameEnd))); 317 reinterpret_cast<void**>(&fakeFrameEnd)));
371 if (realFrameForFakeFrame) { 318 if (realFrameForFakeFrame) {
372 // This is a fake frame from the asan fake stack. 319 // This is a fake frame from the asan fake stack.
373 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) { 320 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) {
374 // The real stack address for the asan fake frame is 321 // The real stack address for the asan fake frame is
375 // within the stack range that we need to scan so we need 322 // within the stack range that we need to scan so we need
376 // to visit the values in the fake frame. 323 // to visit the values in the fake frame.
377 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) 324 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p)
378 ThreadHeap::checkAndMarkPointer(visitor, *p); 325 m_heap->checkAndMarkPointer(visitor, *p);
379 } 326 }
380 } 327 }
381 #endif 328 #endif
382 } 329 }
383 330
384 NO_SANITIZE_ADDRESS 331 NO_SANITIZE_ADDRESS
332 // Stack scanning may overrun the bounds of local objects and/or race with
Alexander Potapenko 2016/04/26 14:04:35 "may overrun the bounds" relates to the NO_SANITIZ
keishi 2016/04/27 02:00:41 Done.
333 // other threads that use this stack.
334 NO_SANITIZE_THREAD
385 void ThreadState::visitStack(Visitor* visitor) 335 void ThreadState::visitStack(Visitor* visitor)
386 { 336 {
387 if (m_stackState == BlinkGC::NoHeapPointersOnStack) 337 if (m_stackState == BlinkGC::NoHeapPointersOnStack)
388 return; 338 return;
389 339
390 Address* start = reinterpret_cast<Address*>(m_startOfStack); 340 Address* start = reinterpret_cast<Address*>(m_startOfStack);
391 // If there is a safepoint scope marker we should stop the stack 341 // If there is a safepoint scope marker we should stop the stack
392 // scanning there to not touch active parts of the stack. Anything 342 // scanning there to not touch active parts of the stack. Anything
393 // interesting beyond that point is in the safepoint stack copy. 343 // interesting beyond that point is in the safepoint stack copy.
394 // If there is no scope marker the thread is blocked and we should 344 // If there is no scope marker the thread is blocked and we should
395 // scan all the way to the recorded end stack pointer. 345 // scan all the way to the recorded end stack pointer.
396 Address* end = reinterpret_cast<Address*>(m_endOfStack); 346 Address* end = reinterpret_cast<Address*>(m_endOfStack);
397 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM arker); 347 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM arker);
398 Address* current = safePointScopeMarker ? safePointScopeMarker : end; 348 Address* current = safePointScopeMarker ? safePointScopeMarker : end;
399 349
400 // Ensure that current is aligned by address size otherwise the loop below 350 // Ensure that current is aligned by address size otherwise the loop below
401 // will read past start address. 351 // will read past start address.
402 current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~ (sizeof(Address) - 1)); 352 current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~ (sizeof(Address) - 1));
403 353
404 for (; current < start; ++current) { 354 for (; current < start; ++current) {
405 Address ptr = *current; 355 Address ptr = *current;
406 #if defined(MEMORY_SANITIZER) 356 #if defined(MEMORY_SANITIZER)
407 // |ptr| may be uninitialized by design. Mark it as initialized to keep 357 // |ptr| may be uninitialized by design. Mark it as initialized to keep
408 // MSan from complaining. 358 // MSan from complaining.
409 // Note: it may be tempting to get rid of |ptr| and simply use |current| 359 // Note: it may be tempting to get rid of |ptr| and simply use |current|
410 // here, but that would be incorrect. We intentionally use a local 360 // here, but that would be incorrect. We intentionally use a local
411 // variable because we don't want to unpoison the original stack. 361 // variable because we don't want to unpoison the original stack.
412 __msan_unpoison(&ptr, sizeof(ptr)); 362 __msan_unpoison(&ptr, sizeof(ptr));
413 #endif 363 #endif
414 ThreadHeap::checkAndMarkPointer(visitor, ptr); 364 m_heap->checkAndMarkPointer(visitor, ptr);
415 visitAsanFakeStackForPointer(visitor, ptr); 365 visitAsanFakeStackForPointer(visitor, ptr);
416 } 366 }
417 367
418 for (Address ptr : m_safePointStackCopy) { 368 for (Address ptr : m_safePointStackCopy) {
419 #if defined(MEMORY_SANITIZER) 369 #if defined(MEMORY_SANITIZER)
420 // See the comment above. 370 // See the comment above.
421 __msan_unpoison(&ptr, sizeof(ptr)); 371 __msan_unpoison(&ptr, sizeof(ptr));
422 #endif 372 #endif
423 ThreadHeap::checkAndMarkPointer(visitor, ptr); 373 m_heap->checkAndMarkPointer(visitor, ptr);
424 visitAsanFakeStackForPointer(visitor, ptr); 374 visitAsanFakeStackForPointer(visitor, ptr);
425 } 375 }
426 } 376 }
427 377
428 void ThreadState::visitPersistents(Visitor* visitor) 378 void ThreadState::visitPersistents(Visitor* visitor)
429 { 379 {
430 m_persistentRegion->tracePersistentNodes(visitor); 380 m_persistentRegion->tracePersistentNodes(visitor);
431 if (m_traceDOMWrappers) { 381 if (m_traceDOMWrappers) {
432 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); 382 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers");
433 m_traceDOMWrappers(m_isolate, visitor); 383 m_traceDOMWrappers(m_isolate, visitor);
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 464
515 if (isMainThread()) { 465 if (isMainThread()) {
516 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me; 466 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi me;
517 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG C.TimeForThreadLocalWeakProcessing", 1, 10 * 1000, 50)); 467 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG C.TimeForThreadLocalWeakProcessing", 1, 10 * 1000, 50));
518 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing); 468 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing);
519 } 469 }
520 } 470 }
521 471
522 size_t ThreadState::totalMemorySize() 472 size_t ThreadState::totalMemorySize()
523 { 473 {
524 return ThreadHeap::heapStats().allocatedObjectSize() + ThreadHeap::heapStats ().markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); 474 return m_heap->heapStats().allocatedObjectSize() + m_heap->heapStats().marke dObjectSize() + WTF::Partitions::totalSizeOfCommittedPages();
525 } 475 }
526 476
527 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) 477 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC)
528 { 478 {
529 if (ThreadHeap::heapStats().wrapperCountAtLastGC() == 0) { 479 if (m_heap->heapStats().wrapperCountAtLastGC() == 0) {
530 // We'll reach here only before hitting the first GC. 480 // We'll reach here only before hitting the first GC.
531 return 0; 481 return 0;
532 } 482 }
533 483
534 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); 484 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC);
535 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / ThreadHeap::heapStats().wrapperCountAtLastGC() * ThreadHeap::heapStats( ).collectedWrapperCount()); 485 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / m_heap->heapStats().wrapperCountAtLastGC() * m_heap->heapStats().collec tedWrapperCount());
536 if (estimationBaseSize < sizeRetainedByCollectedPersistents) 486 if (estimationBaseSize < sizeRetainedByCollectedPersistents)
537 return 0; 487 return 0;
538 return estimationBaseSize - sizeRetainedByCollectedPersistents; 488 return estimationBaseSize - sizeRetainedByCollectedPersistents;
539 } 489 }
540 490
541 double ThreadState::heapGrowingRate() 491 double ThreadState::heapGrowingRate()
542 { 492 {
543 size_t currentSize = ThreadHeap::heapStats().allocatedObjectSize() + ThreadH eap::heapStats().markedObjectSize(); 493 size_t currentSize = m_heap->heapStats().allocatedObjectSize() + m_heap->hea pStats().markedObjectSize();
544 size_t estimatedSize = estimatedLiveSize(ThreadHeap::heapStats().markedObjec tSizeAtLastCompleteSweep(), ThreadHeap::heapStats().markedObjectSizeAtLastComple teSweep()); 494 size_t estimatedSize = estimatedLiveSize(m_heap->heapStats().markedObjectSiz eAtLastCompleteSweep(), m_heap->heapStats().markedObjectSizeAtLastCompleteSweep( ));
545 495
546 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 496 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
547 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 497 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
548 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); 498 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX)));
549 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); 499 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate));
550 return growingRate; 500 return growingRate;
551 } 501 }
552 502
553 double ThreadState::partitionAllocGrowingRate() 503 double ThreadState::partitionAllocGrowingRate()
554 { 504 {
555 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); 505 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages();
556 size_t estimatedSize = estimatedLiveSize(currentSize, ThreadHeap::heapStats( ).partitionAllocSizeAtLastGC()); 506 size_t estimatedSize = estimatedLiveSize(currentSize, m_heap->heapStats().pa rtitionAllocSizeAtLastGC());
557 507
558 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. 508 // If the estimatedSize is 0, we set a high growing rate to trigger a GC.
559 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; 509 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100;
560 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); 510 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX)));
561 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); 511 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate));
562 return growingRate; 512 return growingRate;
563 } 513 }
564 514
565 // TODO(haraken): We should improve the GC heuristics. The heuristics affect 515 // TODO(haraken): We should improve the GC heuristics. The heuristics affect
566 // performance significantly. 516 // performance significantly.
567 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) 517 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold)
568 { 518 {
569 // If the allocated object size or the total memory size is small, don't tri gger a GC. 519 // If the allocated object size or the total memory size is small, don't tri gger a GC.
570 if (ThreadHeap::heapStats().allocatedObjectSize() < 100 * 1024 || totalMemor ySize() < totalMemorySizeThreshold) 520 if (m_heap->heapStats().allocatedObjectSize() < 100 * 1024 || totalMemorySiz e() < totalMemorySizeThreshold)
571 return false; 521 return false;
572 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, 522 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough,
573 // trigger a GC. 523 // trigger a GC.
574 #if PRINT_HEAP_STATS 524 #if PRINT_HEAP_STATS
575 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); 525 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate());
576 #endif 526 #endif
577 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; 527 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold;
578 } 528 }
579 529
580 bool ThreadState::shouldScheduleIdleGC() 530 bool ThreadState::shouldScheduleIdleGC()
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 } 694 }
745 if (shouldScheduleIdleGC()) { 695 if (shouldScheduleIdleGC()) {
746 #if PRINT_HEAP_STATS 696 #if PRINT_HEAP_STATS
747 dataLogF("Scheduled IdleGC\n"); 697 dataLogF("Scheduled IdleGC\n");
748 #endif 698 #endif
749 scheduleIdleGC(); 699 scheduleIdleGC();
750 return; 700 return;
751 } 701 }
752 } 702 }
753 703
704 ThreadState* ThreadState::fromObject(const void* object)
705 {
706 ASSERT(object);
707 BasePage* page = pageFromObject(object);
708 ASSERT(page);
709 ASSERT(page->arena());
710 return page->arena()->getThreadState();
711 }
712
754 void ThreadState::performIdleGC(double deadlineSeconds) 713 void ThreadState::performIdleGC(double deadlineSeconds)
755 { 714 {
756 ASSERT(checkThread()); 715 ASSERT(checkThread());
757 ASSERT(isMainThread()); 716 ASSERT(isMainThread());
758 ASSERT(Platform::current()->currentThread()->scheduler()); 717 ASSERT(Platform::current()->currentThread()->scheduler());
759 718
760 if (gcState() != IdleGCScheduled) 719 if (gcState() != IdleGCScheduled)
761 return; 720 return;
762 721
763 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); 722 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime();
764 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", ThreadHeap::heapStats().estimatedMa rkingTime()); 723 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin gTime());
765 if (idleDeltaInSeconds <= ThreadHeap::heapStats().estimatedMarkingTime() && !Platform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequi red()) { 724 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired( )) {
766 // If marking is estimated to take longer than the deadline and we can't 725 // If marking is estimated to take longer than the deadline and we can't
767 // exceed the deadline, then reschedule for the next idle period. 726 // exceed the deadline, then reschedule for the next idle period.
768 scheduleIdleGC(); 727 scheduleIdleGC();
769 return; 728 return;
770 } 729 }
771 730
772 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::IdleGC); 731 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::IdleGC);
773 } 732 }
774 733
775 void ThreadState::performIdleLazySweep(double deadlineSeconds) 734 void ThreadState::performIdleLazySweep(double deadlineSeconds)
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 // Idle time GC will be scheduled by Blink Scheduler. 916 // Idle time GC will be scheduled by Blink Scheduler.
958 break; 917 break;
959 default: 918 default:
960 break; 919 break;
961 } 920 }
962 } 921 }
963 922
964 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() 923 void ThreadState::flushHeapDoesNotContainCacheIfNeeded()
965 { 924 {
966 if (m_shouldFlushHeapDoesNotContainCache) { 925 if (m_shouldFlushHeapDoesNotContainCache) {
967 ThreadHeap::flushHeapDoesNotContainCache(); 926 m_heap->flushHeapDoesNotContainCache();
968 m_shouldFlushHeapDoesNotContainCache = false; 927 m_shouldFlushHeapDoesNotContainCache = false;
969 } 928 }
970 } 929 }
971 930
972 void ThreadState::makeConsistentForGC() 931 void ThreadState::makeConsistentForGC()
973 { 932 {
974 ASSERT(isInGC()); 933 ASSERT(isInGC());
975 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); 934 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC");
976 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) 935 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
977 m_arenas[i]->makeConsistentForGC(); 936 m_arenas[i]->makeConsistentForGC();
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
1124 postSweep(); 1083 postSweep();
1125 } 1084 }
1126 1085
1127 void ThreadState::postSweep() 1086 void ThreadState::postSweep()
1128 { 1087 {
1129 ASSERT(checkThread()); 1088 ASSERT(checkThread());
1130 ThreadHeap::reportMemoryUsageForTracing(); 1089 ThreadHeap::reportMemoryUsageForTracing();
1131 1090
1132 if (isMainThread()) { 1091 if (isMainThread()) {
1133 double collectionRate = 0; 1092 double collectionRate = 0;
1134 if (ThreadHeap::heapStats().objectSizeAtLastGC() > 0) 1093 if (m_heap->heapStats().objectSizeAtLastGC() > 0)
1135 collectionRate = 1 - 1.0 * ThreadHeap::heapStats().markedObjectSize( ) / ThreadHeap::heapStats().objectSizeAtLastGC(); 1094 collectionRate = 1 - 1.0 * m_heap->heapStats().markedObjectSize() / m_heap->heapStats().objectSizeAtLastGC();
1136 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); 1095 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate));
1137 1096
1138 #if PRINT_HEAP_STATS 1097 #if PRINT_HEAP_STATS
1139 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); 1098 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate));
1140 #endif 1099 #endif
1141 1100
1142 // ThreadHeap::markedObjectSize() may be underestimated here if any othe r 1101 // ThreadHeap::markedObjectSize() may be underestimated here if any othe r
1143 // thread has not yet finished lazy sweeping. 1102 // thread has not yet finished lazy sweeping.
1144 ThreadHeap::heapStats().setMarkedObjectSizeAtLastCompleteSweep(ThreadHea p::heapStats().markedObjectSize()); 1103 m_heap->heapStats().setMarkedObjectSizeAtLastCompleteSweep(m_heap->heapS tats().markedObjectSize());
1145 1104
1146 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, ( "BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50)); 1105 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, ( "BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50));
1147 objectSizeBeforeGCHistogram.count(ThreadHeap::heapStats().objectSizeAtLa stGC() / 1024); 1106 objectSizeBeforeGCHistogram.count(m_heap->heapStats().objectSizeAtLastGC () / 1024);
1148 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, (" BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50)); 1107 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, (" BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50));
1149 objectSizeAfterGCHistogram.count(ThreadHeap::heapStats().markedObjectSiz e() / 1024); 1108 objectSizeAfterGCHistogram.count(m_heap->heapStats().markedObjectSize() / 1024);
1150 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli nkGC.CollectionRate", 1, 100, 20)); 1109 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli nkGC.CollectionRate", 1, 100, 20));
1151 collectionRateHistogram.count(static_cast<int>(100 * collectionRate)); 1110 collectionRateHistogram.count(static_cast<int>(100 * collectionRate));
1152 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50)); 1111 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50));
1153 timeForSweepHistogram.count(m_accumulatedSweepingTime); 1112 timeForSweepHistogram.count(m_accumulatedSweepingTime);
1154 1113
1155 1114
1156 #define COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(GCReason) \ 1115 #define COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(GCReason) \
1157 case BlinkGC::GCReason: { \ 1116 case BlinkGC::GCReason: { \
1158 DEFINE_STATIC_LOCAL(CustomCountHistogram, histogram, \ 1117 DEFINE_STATIC_LOCAL(CustomCountHistogram, histogram, \
1159 ("BlinkGC.CollectionRate_" #GCReason, 1, 100, 20)); \ 1118 ("BlinkGC.CollectionRate_" #GCReason, 1, 100, 20)); \
1160 histogram.count(static_cast<int>(100 * collectionRate)); \ 1119 histogram.count(static_cast<int>(100 * collectionRate)); \
1161 break; \ 1120 break; \
1162 } 1121 }
1163 1122
1164 switch (ThreadHeap::lastGCReason()) { 1123 switch (m_heap->lastGCReason()) {
1165 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(IdleGC) 1124 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(IdleGC)
1166 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PreciseGC) 1125 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PreciseGC)
1167 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ConservativeGC) 1126 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ConservativeGC)
1168 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ForcedGC) 1127 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ForcedGC)
1169 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(MemoryPressureGC) 1128 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(MemoryPressureGC)
1170 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PageNavigationGC) 1129 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PageNavigationGC)
1171 default: 1130 default:
1172 break; 1131 break;
1173 } 1132 }
1174 } 1133 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1208 #endif 1167 #endif
1209 1168
1210 size_t ThreadState::objectPayloadSizeForTesting() 1169 size_t ThreadState::objectPayloadSizeForTesting()
1211 { 1170 {
1212 size_t objectPayloadSize = 0; 1171 size_t objectPayloadSize = 0;
1213 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) 1172 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
1214 objectPayloadSize += m_arenas[i]->objectPayloadSizeForTesting(); 1173 objectPayloadSize += m_arenas[i]->objectPayloadSizeForTesting();
1215 return objectPayloadSize; 1174 return objectPayloadSize;
1216 } 1175 }
1217 1176
1218 bool ThreadState::stopThreads()
1219 {
1220 return s_safePointBarrier->parkOthers();
1221 }
1222
1223 void ThreadState::resumeThreads()
1224 {
1225 s_safePointBarrier->resumeOthers();
1226 }
1227
1228 void ThreadState::safePoint(BlinkGC::StackState stackState) 1177 void ThreadState::safePoint(BlinkGC::StackState stackState)
1229 { 1178 {
1230 ASSERT(checkThread()); 1179 ASSERT(checkThread());
1231 ThreadHeap::reportMemoryUsageForTracing(); 1180 ThreadHeap::reportMemoryUsageForTracing();
1232 1181
1233 runScheduledGC(stackState); 1182 runScheduledGC(stackState);
1234 ASSERT(!m_atSafePoint); 1183 ASSERT(!m_atSafePoint);
1235 m_stackState = stackState; 1184 m_stackState = stackState;
1236 m_atSafePoint = true; 1185 m_atSafePoint = true;
1237 s_safePointBarrier->checkAndPark(this); 1186 m_heap->checkAndPark(this, nullptr);
1238 m_atSafePoint = false; 1187 m_atSafePoint = false;
1239 m_stackState = BlinkGC::HeapPointersOnStack; 1188 m_stackState = BlinkGC::HeapPointersOnStack;
1240 preSweep(); 1189 preSweep();
1241 } 1190 }
1242 1191
1243 #ifdef ADDRESS_SANITIZER 1192 #ifdef ADDRESS_SANITIZER
1244 // When we are running under AddressSanitizer with detect_stack_use_after_return =1 1193 // When we are running under AddressSanitizer with detect_stack_use_after_return =1
1245 // then stack marker obtained from SafePointScope will point into a fake stack. 1194 // then stack marker obtained from SafePointScope will point into a fake stack.
1246 // Detect this case by checking if it falls in between current stack frame 1195 // Detect this case by checking if it falls in between current stack frame
1247 // and stack start and use an arbitrary high enough value for it. 1196 // and stack start and use an arbitrary high enough value for it.
(...skipping 23 matching lines...) Expand all
1271 #ifdef ADDRESS_SANITIZER 1220 #ifdef ADDRESS_SANITIZER
1272 if (stackState == BlinkGC::HeapPointersOnStack) 1221 if (stackState == BlinkGC::HeapPointersOnStack)
1273 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); 1222 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
1274 #endif 1223 #endif
1275 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); 1224 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker);
1276 runScheduledGC(stackState); 1225 runScheduledGC(stackState);
1277 ASSERT(!m_atSafePoint); 1226 ASSERT(!m_atSafePoint);
1278 m_atSafePoint = true; 1227 m_atSafePoint = true;
1279 m_stackState = stackState; 1228 m_stackState = stackState;
1280 m_safePointScopeMarker = scopeMarker; 1229 m_safePointScopeMarker = scopeMarker;
1281 s_safePointBarrier->enterSafePoint(this); 1230 m_heap->enterSafePoint(this);
1282 } 1231 }
1283 1232
1284 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) 1233 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
1285 { 1234 {
1286 ASSERT(checkThread()); 1235 ASSERT(checkThread());
1287 ASSERT(m_atSafePoint); 1236 ASSERT(m_atSafePoint);
1288 s_safePointBarrier->leaveSafePoint(this, locker); 1237 m_heap->leaveSafePoint(this, locker);
1289 m_atSafePoint = false; 1238 m_atSafePoint = false;
1290 m_stackState = BlinkGC::HeapPointersOnStack; 1239 m_stackState = BlinkGC::HeapPointersOnStack;
1291 clearSafePointScopeMarker(); 1240 clearSafePointScopeMarker();
1292 preSweep(); 1241 preSweep();
1293 } 1242 }
1294 1243
1295 void ThreadState::reportMemoryToV8() 1244 void ThreadState::reportMemoryToV8()
1296 { 1245 {
1297 if (!m_isolate) 1246 if (!m_isolate)
1298 return; 1247 return;
1299 1248
1300 size_t currentHeapSize = m_allocatedObjectSize + m_markedObjectSize; 1249 size_t currentHeapSize = m_allocatedObjectSize + m_markedObjectSize;
1301 int64_t diff = static_cast<int64_t>(currentHeapSize) - static_cast<int64_t>( m_reportedMemoryToV8); 1250 int64_t diff = static_cast<int64_t>(currentHeapSize) - static_cast<int64_t>( m_reportedMemoryToV8);
1302 m_isolate->AdjustAmountOfExternalAllocatedMemory(diff); 1251 m_isolate->AdjustAmountOfExternalAllocatedMemory(diff);
1303 m_reportedMemoryToV8 = currentHeapSize; 1252 m_reportedMemoryToV8 = currentHeapSize;
1304 } 1253 }
1305 1254
1306 void ThreadState::resetHeapCounters() 1255 void ThreadState::resetHeapCounters()
1307 { 1256 {
1308 m_allocatedObjectSize = 0; 1257 m_allocatedObjectSize = 0;
1309 m_markedObjectSize = 0; 1258 m_markedObjectSize = 0;
1310 } 1259 }
1311 1260
1312 void ThreadState::increaseAllocatedObjectSize(size_t delta) 1261 void ThreadState::increaseAllocatedObjectSize(size_t delta)
1313 { 1262 {
1314 m_allocatedObjectSize += delta; 1263 m_allocatedObjectSize += delta;
1315 ThreadHeap::heapStats().increaseAllocatedObjectSize(delta); 1264 m_heap->heapStats().increaseAllocatedObjectSize(delta);
1316 } 1265 }
1317 1266
1318 void ThreadState::decreaseAllocatedObjectSize(size_t delta) 1267 void ThreadState::decreaseAllocatedObjectSize(size_t delta)
1319 { 1268 {
1320 m_allocatedObjectSize -= delta; 1269 m_allocatedObjectSize -= delta;
1321 ThreadHeap::heapStats().decreaseAllocatedObjectSize(delta); 1270 m_heap->heapStats().decreaseAllocatedObjectSize(delta);
1322 } 1271 }
1323 1272
1324 void ThreadState::increaseMarkedObjectSize(size_t delta) 1273 void ThreadState::increaseMarkedObjectSize(size_t delta)
1325 { 1274 {
1326 m_markedObjectSize += delta; 1275 m_markedObjectSize += delta;
1327 ThreadHeap::heapStats().increaseMarkedObjectSize(delta); 1276 m_heap->heapStats().increaseMarkedObjectSize(delta);
1328 } 1277 }
1329 1278
1330 void ThreadState::copyStackUntilSafePointScope() 1279 void ThreadState::copyStackUntilSafePointScope()
1331 { 1280 {
1332 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k) 1281 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac k)
1333 return; 1282 return;
1334 1283
1335 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); 1284 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
1336 Address* from = reinterpret_cast<Address*>(m_endOfStack); 1285 Address* from = reinterpret_cast<Address*>(m_endOfStack);
1337 RELEASE_ASSERT(from < to); 1286 RELEASE_ASSERT(from < to);
(...skipping 13 matching lines...) Expand all
1351 for (size_t i = 0; i < slotCount; ++i) { 1300 for (size_t i = 0; i < slotCount; ++i) {
1352 m_safePointStackCopy[i] = from[i]; 1301 m_safePointStackCopy[i] = from[i];
1353 } 1302 }
1354 } 1303 }
1355 1304
1356 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor) 1305 void ThreadState::addInterruptor(PassOwnPtr<BlinkGCInterruptor> interruptor)
1357 { 1306 {
1358 ASSERT(checkThread()); 1307 ASSERT(checkThread());
1359 SafePointScope scope(BlinkGC::HeapPointersOnStack); 1308 SafePointScope scope(BlinkGC::HeapPointersOnStack);
1360 { 1309 {
1361 MutexLocker locker(threadAttachMutex()); 1310 MutexLocker locker(m_heap->threadAttachMutex());
1362 m_interruptors.append(interruptor); 1311 m_interruptors.append(interruptor);
1363 } 1312 }
1364 } 1313 }
1365 1314
1366 void ThreadState::registerStaticPersistentNode(PersistentNode* node, PersistentC learCallback callback) 1315 void ThreadState::registerStaticPersistentNode(PersistentNode* node, PersistentC learCallback callback)
1367 { 1316 {
1368 #if defined(LEAK_SANITIZER) 1317 #if defined(LEAK_SANITIZER)
1369 if (m_disabledStaticPersistentsRegistration) 1318 if (m_disabledStaticPersistentsRegistration)
1370 return; 1319 return;
1371 #endif 1320 #endif
(...skipping 30 matching lines...) Expand all
1402 m_disabledStaticPersistentsRegistration++; 1351 m_disabledStaticPersistentsRegistration++;
1403 } 1352 }
1404 1353
1405 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() 1354 void ThreadState::leaveStaticReferenceRegistrationDisabledScope()
1406 { 1355 {
1407 ASSERT(m_disabledStaticPersistentsRegistration); 1356 ASSERT(m_disabledStaticPersistentsRegistration);
1408 m_disabledStaticPersistentsRegistration--; 1357 m_disabledStaticPersistentsRegistration--;
1409 } 1358 }
1410 #endif 1359 #endif
1411 1360
1412 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
1413 {
1414 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
1415 return threads;
1416 }
1417
1418 void ThreadState::lockThreadAttachMutex() 1361 void ThreadState::lockThreadAttachMutex()
1419 { 1362 {
1420 threadAttachMutex().lock(); 1363 m_heap->threadAttachMutex().lock();
1421 } 1364 }
1422 1365
1423 void ThreadState::unlockThreadAttachMutex() 1366 void ThreadState::unlockThreadAttachMutex()
1424 { 1367 {
1425 threadAttachMutex().unlock(); 1368 m_heap->threadAttachMutex().unlock();
1426 } 1369 }
1427 1370
1428 void ThreadState::invokePreFinalizers() 1371 void ThreadState::invokePreFinalizers()
1429 { 1372 {
1430 ASSERT(checkThread()); 1373 ASSERT(checkThread());
1431 ASSERT(!sweepForbidden()); 1374 ASSERT(!sweepForbidden());
1432 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); 1375 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers");
1433 1376
1434 double startTime = WTF::currentTimeMS(); 1377 double startTime = WTF::currentTimeMS();
1435 if (!m_orderedPreFinalizers.isEmpty()) { 1378 if (!m_orderedPreFinalizers.isEmpty()) {
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
1571 threadDump->addScalar("dead_count", "objects", totalDeadCount); 1514 threadDump->addScalar("dead_count", "objects", totalDeadCount);
1572 threadDump->addScalar("live_size", "bytes", totalLiveSize); 1515 threadDump->addScalar("live_size", "bytes", totalLiveSize);
1573 threadDump->addScalar("dead_size", "bytes", totalDeadSize); 1516 threadDump->addScalar("dead_size", "bytes", totalDeadSize);
1574 1517
1575 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); 1518 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName);
1576 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); 1519 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName);
1577 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); 1520 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid());
1578 } 1521 }
1579 1522
1580 } // namespace blink 1523 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/ThreadState.h ('k') | third_party/WebKit/Source/platform/testing/RunAllTests.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698