Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(209)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 2697703005: Remove ThreadHeap::m_threads (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); 139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
140 ProcessHeap::increaseTotalAllocatedSpace(delta); 140 ProcessHeap::increaseTotalAllocatedSpace(delta);
141 } 141 }
142 142
143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) { 143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) {
144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); 144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
145 ProcessHeap::decreaseTotalAllocatedSpace(delta); 145 ProcessHeap::decreaseTotalAllocatedSpace(delta);
146 } 146 }
147 147
148 ThreadHeap::ThreadHeap() 148 ThreadHeap::ThreadHeap()
149 : m_regionTree(WTF::makeUnique<RegionTree>()), 149 : m_threadState(nullptr),
150 m_regionTree(WTF::makeUnique<RegionTree>()),
150 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)), 151 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)),
151 m_safePointBarrier(WTF::makeUnique<SafePointBarrier>()), 152 m_safePointBarrier(WTF::makeUnique<SafePointBarrier>()),
152 m_freePagePool(WTF::wrapUnique(new PagePool)), 153 m_freePagePool(WTF::wrapUnique(new PagePool)),
153 m_markingStack(CallbackStack::create()), 154 m_markingStack(CallbackStack::create()),
154 m_postMarkingCallbackStack(CallbackStack::create()), 155 m_postMarkingCallbackStack(CallbackStack::create()),
155 m_globalWeakCallbackStack(CallbackStack::create()), 156 m_globalWeakCallbackStack(CallbackStack::create()),
156 m_ephemeronStack(CallbackStack::create()) { 157 m_ephemeronStack(CallbackStack::create()) {
157 if (ThreadState::current()->isMainThread()) 158 if (ThreadState::current()->isMainThread())
158 s_mainThreadHeap = this; 159 s_mainThreadHeap = this;
159 160
160 MutexLocker locker(ThreadHeap::allHeapsMutex()); 161 MutexLocker locker(ThreadHeap::allHeapsMutex());
161 allHeaps().insert(this); 162 allHeaps().insert(this);
162 } 163 }
163 164
164 ThreadHeap::~ThreadHeap() { 165 ThreadHeap::~ThreadHeap() {
165 MutexLocker locker(ThreadHeap::allHeapsMutex()); 166 MutexLocker locker(ThreadHeap::allHeapsMutex());
166 allHeaps().remove(this); 167 allHeaps().remove(this);
167 } 168 }
168 169
169 RecursiveMutex& ThreadHeap::allHeapsMutex() { 170 RecursiveMutex& ThreadHeap::allHeapsMutex() {
170 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)); 171 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex));
171 return mutex; 172 return mutex;
172 } 173 }
173 174
174 HashSet<ThreadHeap*>& ThreadHeap::allHeaps() { 175 HashSet<ThreadHeap*>& ThreadHeap::allHeaps() {
175 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ()); 176 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ());
176 return heaps; 177 return heaps;
177 } 178 }
178 179
179 void ThreadHeap::attach(ThreadState* thread) { 180 void ThreadHeap::attach(ThreadState* threadState) {
180 MutexLocker locker(m_threadAttachMutex); 181 DCHECK(!m_threadState);
181 m_threads.insert(thread); 182 m_threadState = threadState;
182 } 183 }
183 184
184 void ThreadHeap::detach(ThreadState* thread) { 185 void ThreadHeap::detach(ThreadState* threadState) {
185 ASSERT(ThreadState::current() == thread); 186 DCHECK(ThreadState::current() == threadState);
186 bool isLastThread = false; 187 DCHECK(m_threadState == threadState);
187 { 188 threadState->runTerminationGC();
188 // Grab the threadAttachMutex to ensure only one thread can shutdown at 189 if (threadState->isMainThread())
189 // a time and that no other thread can do a global GC. It also allows
190 // safe iteration of the m_threads set which happens as part of
191 // thread local GC asserts. We enter a safepoint while waiting for the
192 // lock to avoid a dead-lock where another thread has already requested
193 // GC.
194 MutexLocker locker(m_threadAttachMutex);
195 thread->runTerminationGC();
196 ASSERT(m_threads.contains(thread));
197 m_threads.remove(thread);
198 isLastThread = m_threads.isEmpty();
199 }
200 if (thread->isMainThread())
201 DCHECK_EQ(heapStats().allocatedSpace(), 0u); 190 DCHECK_EQ(heapStats().allocatedSpace(), 0u);
202 if (isLastThread) 191 delete this;
sof 2017/02/15 08:26:32 Would it work for |ThreadState::m_heap| to be a |s
203 delete this;
204 } 192 }
205 193
206 #if DCHECK_IS_ON() 194 #if DCHECK_IS_ON()
207 BasePage* ThreadHeap::findPageFromAddress(Address address) { 195 BasePage* ThreadHeap::findPageFromAddress(Address address) {
208 MutexLocker locker(m_threadAttachMutex); 196 return m_threadState->findPageFromAddress(address);
209 for (ThreadState* state : m_threads) {
210 if (BasePage* page = state->findPageFromAddress(address))
211 return page;
212 }
213 return nullptr;
214 } 197 }
215 198
216 bool ThreadHeap::isAtSafePoint() { 199 bool ThreadHeap::isAtSafePoint() {
217 MutexLocker locker(m_threadAttachMutex); 200 return m_threadState->isAtSafePoint();
218 for (ThreadState* state : m_threads) {
219 if (!state->isAtSafePoint())
220 return false;
221 }
222 return true;
223 } 201 }
224 #endif 202 #endif
225 203
226 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) { 204 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) {
227 ASSERT(ThreadState::current()->isInGC()); 205 ASSERT(ThreadState::current()->isInGC());
228 206
229 #if !DCHECK_IS_ON() 207 #if !DCHECK_IS_ON()
230 if (m_heapDoesNotContainCache->lookup(address)) 208 if (m_heapDoesNotContainCache->lookup(address))
231 return nullptr; 209 return nullptr;
232 #endif 210 #endif
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
364 342
365 void ThreadHeap::decommitCallbackStacks() { 343 void ThreadHeap::decommitCallbackStacks() {
366 m_markingStack->decommit(); 344 m_markingStack->decommit();
367 m_postMarkingCallbackStack->decommit(); 345 m_postMarkingCallbackStack->decommit();
368 m_globalWeakCallbackStack->decommit(); 346 m_globalWeakCallbackStack->decommit();
369 m_ephemeronStack->decommit(); 347 m_ephemeronStack->decommit();
370 } 348 }
371 349
372 void ThreadHeap::preGC() { 350 void ThreadHeap::preGC() {
373 ASSERT(!ThreadState::current()->isInGC()); 351 ASSERT(!ThreadState::current()->isInGC());
374 for (ThreadState* state : m_threads) 352 m_threadState->preGC();
375 state->preGC();
376 } 353 }
377 354
378 void ThreadHeap::postGC(BlinkGC::GCType gcType) { 355 void ThreadHeap::postGC(BlinkGC::GCType gcType) {
379 ASSERT(ThreadState::current()->isInGC()); 356 ASSERT(ThreadState::current()->isInGC());
380 for (ThreadState* state : m_threads) 357 m_threadState->postGC(gcType);
381 state->postGC(gcType);
382 } 358 }
383 359
384 void ThreadHeap::preSweep(BlinkGC::GCType gcType) { 360 void ThreadHeap::preSweep(BlinkGC::GCType gcType) {
385 for (ThreadState* state : m_threads) 361 m_threadState->preSweep(gcType);
386 state->preSweep(gcType);
387 } 362 }
388 363
389 void ThreadHeap::processMarkingStack(Visitor* visitor) { 364 void ThreadHeap::processMarkingStack(Visitor* visitor) {
390 // Ephemeron fixed point loop. 365 // Ephemeron fixed point loop.
391 do { 366 do {
392 { 367 {
393 // Iteratively mark all objects that are reachable from the objects 368 // Iteratively mark all objects that are reachable from the objects
394 // currently pushed onto the marking stack. 369 // currently pushed onto the marking stack.
395 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded"); 370 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded");
396 while (popAndInvokeTraceCallback(visitor)) { 371 while (popAndInvokeTraceCallback(visitor)) {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
528 "ThreadHeap::partitionAllocSizeAtLastGCKB", 503 "ThreadHeap::partitionAllocSizeAtLastGCKB",
529 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024, 504 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024,
530 static_cast<size_t>(INT_MAX))); 505 static_cast<size_t>(INT_MAX)));
531 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), 506 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
532 "Partitions::totalSizeOfCommittedPagesKB", 507 "Partitions::totalSizeOfCommittedPagesKB",
533 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, 508 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024,
534 static_cast<size_t>(INT_MAX))); 509 static_cast<size_t>(INT_MAX)));
535 } 510 }
536 511
537 size_t ThreadHeap::objectPayloadSizeForTesting() { 512 size_t ThreadHeap::objectPayloadSizeForTesting() {
538 // MEMO: is threadAttachMutex locked?
539 size_t objectPayloadSize = 0; 513 size_t objectPayloadSize = 0;
540 for (ThreadState* state : m_threads) { 514 m_threadState->setGCState(ThreadState::GCRunning);
541 state->setGCState(ThreadState::GCRunning); 515 m_threadState->makeConsistentForGC();
542 state->makeConsistentForGC(); 516 objectPayloadSize += m_threadState->objectPayloadSizeForTesting();
543 objectPayloadSize += state->objectPayloadSizeForTesting(); 517 m_threadState->setGCState(ThreadState::Sweeping);
544 state->setGCState(ThreadState::Sweeping); 518 m_threadState->setGCState(ThreadState::NoGCScheduled);
545 state->setGCState(ThreadState::NoGCScheduled);
546 }
547 return objectPayloadSize; 519 return objectPayloadSize;
548 } 520 }
549 521
550 void ThreadHeap::visitPersistentRoots(Visitor* visitor) { 522 void ThreadHeap::visitPersistentRoots(Visitor* visitor) {
551 ASSERT(ThreadState::current()->isInGC()); 523 ASSERT(ThreadState::current()->isInGC());
552 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots"); 524 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots");
553 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor); 525 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
554 526
555 for (ThreadState* state : m_threads) 527 m_threadState->visitPersistents(visitor);
556 state->visitPersistents(visitor);
557 } 528 }
558 529
559 void ThreadHeap::visitStackRoots(Visitor* visitor) { 530 void ThreadHeap::visitStackRoots(Visitor* visitor) {
560 ASSERT(ThreadState::current()->isInGC()); 531 ASSERT(ThreadState::current()->isInGC());
561 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots"); 532 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots");
562 for (ThreadState* state : m_threads) 533 m_threadState->visitStack(visitor);
563 state->visitStack(visitor);
564 } 534 }
565 535
566 void ThreadHeap::enterSafePoint(ThreadState* threadState) { 536 void ThreadHeap::enterSafePoint(ThreadState* threadState) {
567 m_safePointBarrier->enterSafePoint(threadState); 537 m_safePointBarrier->enterSafePoint(threadState);
568 } 538 }
569 539
570 void ThreadHeap::leaveSafePoint() { 540 void ThreadHeap::leaveSafePoint() {
571 m_safePointBarrier->leaveSafePoint(); 541 m_safePointBarrier->leaveSafePoint();
572 } 542 }
573 543
574 BasePage* ThreadHeap::lookupPageForAddress(Address address) { 544 BasePage* ThreadHeap::lookupPageForAddress(Address address) {
575 ASSERT(ThreadState::current()->isInGC()); 545 ASSERT(ThreadState::current()->isInGC());
576 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { 546 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
577 return region->pageFromAddress(address); 547 return region->pageFromAddress(address);
578 } 548 }
579 return nullptr; 549 return nullptr;
580 } 550 }
581 551
582 void ThreadHeap::resetHeapCounters() { 552 void ThreadHeap::resetHeapCounters() {
583 ASSERT(ThreadState::current()->isInGC()); 553 ASSERT(ThreadState::current()->isInGC());
584 554
585 ThreadHeap::reportMemoryUsageForTracing(); 555 ThreadHeap::reportMemoryUsageForTracing();
586 556
587 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()); 557 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize());
588 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); 558 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize());
589 559
590 m_stats.reset(); 560 m_stats.reset();
591 for (ThreadState* state : m_threads) 561 m_threadState->resetHeapCounters();
592 state->resetHeapCounters();
593 } 562 }
594 563
595 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; 564 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr;
596 565
597 } // namespace blink 566 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/Heap.h ('k') | third_party/WebKit/Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698