Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(227)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 2697703005: Remove ThreadHeap::m_threads (Closed)
Patch Set: temp Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
138 void ThreadHeapStats::increaseAllocatedSpace(size_t delta) { 138 void ThreadHeapStats::increaseAllocatedSpace(size_t delta) {
139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); 139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
140 ProcessHeap::increaseTotalAllocatedSpace(delta); 140 ProcessHeap::increaseTotalAllocatedSpace(delta);
141 } 141 }
142 142
143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) { 143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) {
144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); 144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
145 ProcessHeap::decreaseTotalAllocatedSpace(delta); 145 ProcessHeap::decreaseTotalAllocatedSpace(delta);
146 } 146 }
147 147
148 ThreadHeap::ThreadHeap() 148 ThreadHeap::ThreadHeap(ThreadState* threadState)
149 : m_regionTree(WTF::makeUnique<RegionTree>()), 149 : m_threadState(threadState),
150 m_regionTree(WTF::makeUnique<RegionTree>()),
150 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)), 151 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)),
151 m_safePointBarrier(WTF::makeUnique<SafePointBarrier>()), 152 m_safePointBarrier(WTF::makeUnique<SafePointBarrier>()),
152 m_freePagePool(WTF::wrapUnique(new PagePool)), 153 m_freePagePool(WTF::wrapUnique(new PagePool)),
153 m_markingStack(CallbackStack::create()), 154 m_markingStack(CallbackStack::create()),
154 m_postMarkingCallbackStack(CallbackStack::create()), 155 m_postMarkingCallbackStack(CallbackStack::create()),
155 m_globalWeakCallbackStack(CallbackStack::create()), 156 m_globalWeakCallbackStack(CallbackStack::create()),
156 m_ephemeronStack(CallbackStack::create()) { 157 m_ephemeronStack(CallbackStack::create()) {
157 if (ThreadState::current()->isMainThread()) 158 if (ThreadState::current()->isMainThread())
158 s_mainThreadHeap = this; 159 s_mainThreadHeap = this;
159 160
160 MutexLocker locker(ThreadHeap::allHeapsMutex()); 161 MutexLocker locker(ThreadHeap::allHeapsMutex());
161 allHeaps().insert(this); 162 allHeaps().insert(this);
162 } 163 }
163 164
164 ThreadHeap::~ThreadHeap() { 165 ThreadHeap::~ThreadHeap() {
165 MutexLocker locker(ThreadHeap::allHeapsMutex()); 166 MutexLocker locker(ThreadHeap::allHeapsMutex());
166 allHeaps().remove(this); 167 allHeaps().remove(this);
167 } 168 }
168 169
169 RecursiveMutex& ThreadHeap::allHeapsMutex() { 170 RecursiveMutex& ThreadHeap::allHeapsMutex() {
170 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)); 171 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex));
171 return mutex; 172 return mutex;
172 } 173 }
173 174
174 HashSet<ThreadHeap*>& ThreadHeap::allHeaps() { 175 HashSet<ThreadHeap*>& ThreadHeap::allHeaps() {
175 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ()); 176 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ());
176 return heaps; 177 return heaps;
177 } 178 }
178 179
179 void ThreadHeap::attach(ThreadState* thread) {
180 MutexLocker locker(m_threadAttachMutex);
181 m_threads.insert(thread);
182 }
183
184 void ThreadHeap::detach(ThreadState* thread) {
185 ASSERT(ThreadState::current() == thread);
186 bool isLastThread = false;
187 {
188 // Grab the threadAttachMutex to ensure only one thread can shutdown at
189 // a time and that no other thread can do a global GC. It also allows
190 // safe iteration of the m_threads set which happens as part of
191 // thread local GC asserts. We enter a safepoint while waiting for the
192 // lock to avoid a dead-lock where another thread has already requested
193 // GC.
194 MutexLocker locker(m_threadAttachMutex);
195 thread->runTerminationGC();
196 ASSERT(m_threads.contains(thread));
197 m_threads.remove(thread);
198 isLastThread = m_threads.isEmpty();
199 }
200 if (thread->isMainThread())
sof 2017/02/15 09:18:21 we could move this to the dtor, up to you.
201 DCHECK_EQ(heapStats().allocatedSpace(), 0u);
202 if (isLastThread)
203 delete this;
204 }
205
206 #if DCHECK_IS_ON() 180 #if DCHECK_IS_ON()
207 BasePage* ThreadHeap::findPageFromAddress(Address address) { 181 BasePage* ThreadHeap::findPageFromAddress(Address address) {
208 MutexLocker locker(m_threadAttachMutex); 182 return m_threadState->findPageFromAddress(address);
209 for (ThreadState* state : m_threads) {
210 if (BasePage* page = state->findPageFromAddress(address))
211 return page;
212 }
213 return nullptr;
214 } 183 }
215 184
216 bool ThreadHeap::isAtSafePoint() { 185 bool ThreadHeap::isAtSafePoint() {
217 MutexLocker locker(m_threadAttachMutex); 186 return m_threadState->isAtSafePoint();
218 for (ThreadState* state : m_threads) {
219 if (!state->isAtSafePoint())
220 return false;
221 }
222 return true;
223 } 187 }
224 #endif 188 #endif
225 189
226 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) { 190 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) {
227 ASSERT(ThreadState::current()->isInGC()); 191 ASSERT(ThreadState::current()->isInGC());
228 192
229 #if !DCHECK_IS_ON() 193 #if !DCHECK_IS_ON()
230 if (m_heapDoesNotContainCache->lookup(address)) 194 if (m_heapDoesNotContainCache->lookup(address))
231 return nullptr; 195 return nullptr;
232 #endif 196 #endif
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
364 328
365 void ThreadHeap::decommitCallbackStacks() { 329 void ThreadHeap::decommitCallbackStacks() {
366 m_markingStack->decommit(); 330 m_markingStack->decommit();
367 m_postMarkingCallbackStack->decommit(); 331 m_postMarkingCallbackStack->decommit();
368 m_globalWeakCallbackStack->decommit(); 332 m_globalWeakCallbackStack->decommit();
369 m_ephemeronStack->decommit(); 333 m_ephemeronStack->decommit();
370 } 334 }
371 335
372 void ThreadHeap::preGC() { 336 void ThreadHeap::preGC() {
373 ASSERT(!ThreadState::current()->isInGC()); 337 ASSERT(!ThreadState::current()->isInGC());
374 for (ThreadState* state : m_threads) 338 m_threadState->preGC();
375 state->preGC();
376 } 339 }
377 340
378 void ThreadHeap::postGC(BlinkGC::GCType gcType) { 341 void ThreadHeap::postGC(BlinkGC::GCType gcType) {
379 ASSERT(ThreadState::current()->isInGC()); 342 ASSERT(ThreadState::current()->isInGC());
380 for (ThreadState* state : m_threads) 343 m_threadState->postGC(gcType);
381 state->postGC(gcType);
382 } 344 }
383 345
384 void ThreadHeap::preSweep(BlinkGC::GCType gcType) { 346 void ThreadHeap::preSweep(BlinkGC::GCType gcType) {
385 for (ThreadState* state : m_threads) 347 m_threadState->preSweep(gcType);
386 state->preSweep(gcType);
387 } 348 }
388 349
389 void ThreadHeap::processMarkingStack(Visitor* visitor) { 350 void ThreadHeap::processMarkingStack(Visitor* visitor) {
390 // Ephemeron fixed point loop. 351 // Ephemeron fixed point loop.
391 do { 352 do {
392 { 353 {
393 // Iteratively mark all objects that are reachable from the objects 354 // Iteratively mark all objects that are reachable from the objects
394 // currently pushed onto the marking stack. 355 // currently pushed onto the marking stack.
395 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded"); 356 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded");
396 while (popAndInvokeTraceCallback(visitor)) { 357 while (popAndInvokeTraceCallback(visitor)) {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
528 "ThreadHeap::partitionAllocSizeAtLastGCKB", 489 "ThreadHeap::partitionAllocSizeAtLastGCKB",
529 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024, 490 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024,
530 static_cast<size_t>(INT_MAX))); 491 static_cast<size_t>(INT_MAX)));
531 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), 492 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
532 "Partitions::totalSizeOfCommittedPagesKB", 493 "Partitions::totalSizeOfCommittedPagesKB",
533 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, 494 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024,
534 static_cast<size_t>(INT_MAX))); 495 static_cast<size_t>(INT_MAX)));
535 } 496 }
536 497
537 size_t ThreadHeap::objectPayloadSizeForTesting() { 498 size_t ThreadHeap::objectPayloadSizeForTesting() {
538 // MEMO: is threadAttachMutex locked?
539 size_t objectPayloadSize = 0; 499 size_t objectPayloadSize = 0;
540 for (ThreadState* state : m_threads) { 500 m_threadState->setGCState(ThreadState::GCRunning);
541 state->setGCState(ThreadState::GCRunning); 501 m_threadState->makeConsistentForGC();
542 state->makeConsistentForGC(); 502 objectPayloadSize += m_threadState->objectPayloadSizeForTesting();
543 objectPayloadSize += state->objectPayloadSizeForTesting(); 503 m_threadState->setGCState(ThreadState::Sweeping);
544 state->setGCState(ThreadState::Sweeping); 504 m_threadState->setGCState(ThreadState::NoGCScheduled);
545 state->setGCState(ThreadState::NoGCScheduled);
546 }
547 return objectPayloadSize; 505 return objectPayloadSize;
548 } 506 }
549 507
550 void ThreadHeap::visitPersistentRoots(Visitor* visitor) { 508 void ThreadHeap::visitPersistentRoots(Visitor* visitor) {
551 ASSERT(ThreadState::current()->isInGC()); 509 ASSERT(ThreadState::current()->isInGC());
552 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots"); 510 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots");
553 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor); 511 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
554 512
555 for (ThreadState* state : m_threads) 513 m_threadState->visitPersistents(visitor);
556 state->visitPersistents(visitor);
557 } 514 }
558 515
559 void ThreadHeap::visitStackRoots(Visitor* visitor) { 516 void ThreadHeap::visitStackRoots(Visitor* visitor) {
560 ASSERT(ThreadState::current()->isInGC()); 517 ASSERT(ThreadState::current()->isInGC());
561 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots"); 518 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots");
562 for (ThreadState* state : m_threads) 519 m_threadState->visitStack(visitor);
563 state->visitStack(visitor);
564 } 520 }
565 521
566 void ThreadHeap::enterSafePoint(ThreadState* threadState) { 522 void ThreadHeap::enterSafePoint(ThreadState* threadState) {
567 m_safePointBarrier->enterSafePoint(threadState); 523 m_safePointBarrier->enterSafePoint(threadState);
568 } 524 }
569 525
570 void ThreadHeap::leaveSafePoint() { 526 void ThreadHeap::leaveSafePoint() {
571 m_safePointBarrier->leaveSafePoint(); 527 m_safePointBarrier->leaveSafePoint();
572 } 528 }
573 529
574 BasePage* ThreadHeap::lookupPageForAddress(Address address) { 530 BasePage* ThreadHeap::lookupPageForAddress(Address address) {
575 ASSERT(ThreadState::current()->isInGC()); 531 ASSERT(ThreadState::current()->isInGC());
576 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { 532 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
577 return region->pageFromAddress(address); 533 return region->pageFromAddress(address);
578 } 534 }
579 return nullptr; 535 return nullptr;
580 } 536 }
581 537
582 void ThreadHeap::resetHeapCounters() { 538 void ThreadHeap::resetHeapCounters() {
583 ASSERT(ThreadState::current()->isInGC()); 539 ASSERT(ThreadState::current()->isInGC());
584 540
585 ThreadHeap::reportMemoryUsageForTracing(); 541 ThreadHeap::reportMemoryUsageForTracing();
586 542
587 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()); 543 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize());
588 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); 544 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize());
589 545
590 m_stats.reset(); 546 m_stats.reset();
591 for (ThreadState* state : m_threads) 547 m_threadState->resetHeapCounters();
592 state->resetHeapCounters();
593 } 548 }
594 549
595 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; 550 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr;
596 551
597 } // namespace blink 552 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/Heap.h ('k') | third_party/WebKit/Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698