Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(261)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 2697703005: Remove ThreadHeap::m_threads (Closed)
Patch Set: temp Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
138 void ThreadHeapStats::increaseAllocatedSpace(size_t delta) { 138 void ThreadHeapStats::increaseAllocatedSpace(size_t delta) {
139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); 139 atomicAdd(&m_allocatedSpace, static_cast<long>(delta));
140 ProcessHeap::increaseTotalAllocatedSpace(delta); 140 ProcessHeap::increaseTotalAllocatedSpace(delta);
141 } 141 }
142 142
143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) { 143 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) {
144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); 144 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta));
145 ProcessHeap::decreaseTotalAllocatedSpace(delta); 145 ProcessHeap::decreaseTotalAllocatedSpace(delta);
146 } 146 }
147 147
148 ThreadHeap::ThreadHeap() 148 ThreadHeap::ThreadHeap(ThreadState* threadState)
149 : m_regionTree(WTF::makeUnique<RegionTree>()), 149 : m_threadState(threadState),
150 m_regionTree(WTF::makeUnique<RegionTree>()),
150 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)), 151 m_heapDoesNotContainCache(WTF::wrapUnique(new HeapDoesNotContainCache)),
151 m_freePagePool(WTF::wrapUnique(new PagePool)), 152 m_freePagePool(WTF::wrapUnique(new PagePool)),
152 m_markingStack(CallbackStack::create()), 153 m_markingStack(CallbackStack::create()),
153 m_postMarkingCallbackStack(CallbackStack::create()), 154 m_postMarkingCallbackStack(CallbackStack::create()),
154 m_globalWeakCallbackStack(CallbackStack::create()), 155 m_globalWeakCallbackStack(CallbackStack::create()),
155 m_ephemeronStack(CallbackStack::create()) { 156 m_ephemeronStack(CallbackStack::create()) {
156 if (ThreadState::current()->isMainThread()) 157 if (ThreadState::current()->isMainThread())
157 s_mainThreadHeap = this; 158 s_mainThreadHeap = this;
158 } 159 }
159 160
160 ThreadHeap::~ThreadHeap() { 161 ThreadHeap::~ThreadHeap() {
161 } 162 }
162 163
163 void ThreadHeap::attach(ThreadState* thread) {
164 MutexLocker locker(m_threadAttachMutex);
165 m_threads.insert(thread);
166 }
167
168 void ThreadHeap::detach(ThreadState* thread) {
169 ASSERT(ThreadState::current() == thread);
170 bool isLastThread = false;
171 {
172 // Grab the threadAttachMutex to ensure only one thread can shutdown at
173 // a time and that no other thread can do a global GC. It also allows
174 // safe iteration of the m_threads set which happens as part of
175 // thread local GC asserts. We enter a safepoint while waiting for the
176 // lock to avoid a dead-lock where another thread has already requested
177 // GC.
178 MutexLocker locker(m_threadAttachMutex);
179 thread->runTerminationGC();
180 ASSERT(m_threads.contains(thread));
181 m_threads.erase(thread);
182 isLastThread = m_threads.isEmpty();
183 }
184 if (thread->isMainThread())
185 DCHECK_EQ(heapStats().allocatedSpace(), 0u);
186 if (isLastThread)
187 delete this;
188 }
189
190 #if DCHECK_IS_ON() 164 #if DCHECK_IS_ON()
191 BasePage* ThreadHeap::findPageFromAddress(Address address) { 165 BasePage* ThreadHeap::findPageFromAddress(Address address) {
192 MutexLocker locker(m_threadAttachMutex); 166 return m_threadState->findPageFromAddress(address);
193 for (ThreadState* state : m_threads) {
194 if (BasePage* page = state->findPageFromAddress(address))
195 return page;
196 }
197 return nullptr;
198 } 167 }
199 #endif 168 #endif
200 169
201 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) { 170 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) {
202 ASSERT(ThreadState::current()->isInGC()); 171 ASSERT(ThreadState::current()->isInGC());
203 172
204 #if !DCHECK_IS_ON() 173 #if !DCHECK_IS_ON()
205 if (m_heapDoesNotContainCache->lookup(address)) 174 if (m_heapDoesNotContainCache->lookup(address))
206 return nullptr; 175 return nullptr;
207 #endif 176 #endif
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
339 308
340 void ThreadHeap::decommitCallbackStacks() { 309 void ThreadHeap::decommitCallbackStacks() {
341 m_markingStack->decommit(); 310 m_markingStack->decommit();
342 m_postMarkingCallbackStack->decommit(); 311 m_postMarkingCallbackStack->decommit();
343 m_globalWeakCallbackStack->decommit(); 312 m_globalWeakCallbackStack->decommit();
344 m_ephemeronStack->decommit(); 313 m_ephemeronStack->decommit();
345 } 314 }
346 315
347 void ThreadHeap::preGC() { 316 void ThreadHeap::preGC() {
348 ASSERT(!ThreadState::current()->isInGC()); 317 ASSERT(!ThreadState::current()->isInGC());
349 for (ThreadState* state : m_threads) 318 m_threadState->preGC();
350 state->preGC();
351 } 319 }
352 320
353 void ThreadHeap::postGC(BlinkGC::GCType gcType) { 321 void ThreadHeap::postGC(BlinkGC::GCType gcType) {
354 ASSERT(ThreadState::current()->isInGC()); 322 ASSERT(ThreadState::current()->isInGC());
355 for (ThreadState* state : m_threads) 323 m_threadState->postGC(gcType);
356 state->postGC(gcType);
357 } 324 }
358 325
359 void ThreadHeap::preSweep(BlinkGC::GCType gcType) { 326 void ThreadHeap::preSweep(BlinkGC::GCType gcType) {
360 for (ThreadState* state : m_threads) 327 m_threadState->preSweep(gcType);
361 state->preSweep(gcType);
362 } 328 }
363 329
364 void ThreadHeap::processMarkingStack(Visitor* visitor) { 330 void ThreadHeap::processMarkingStack(Visitor* visitor) {
365 // Ephemeron fixed point loop. 331 // Ephemeron fixed point loop.
366 do { 332 do {
367 { 333 {
368 // Iteratively mark all objects that are reachable from the objects 334 // Iteratively mark all objects that are reachable from the objects
369 // currently pushed onto the marking stack. 335 // currently pushed onto the marking stack.
370 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded"); 336 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThreaded");
371 while (popAndInvokeTraceCallback(visitor)) { 337 while (popAndInvokeTraceCallback(visitor)) {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
503 "ThreadHeap::partitionAllocSizeAtLastGCKB", 469 "ThreadHeap::partitionAllocSizeAtLastGCKB",
504 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024, 470 std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024,
505 static_cast<size_t>(INT_MAX))); 471 static_cast<size_t>(INT_MAX)));
506 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), 472 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
507 "Partitions::totalSizeOfCommittedPagesKB", 473 "Partitions::totalSizeOfCommittedPagesKB",
508 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, 474 std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024,
509 static_cast<size_t>(INT_MAX))); 475 static_cast<size_t>(INT_MAX)));
510 } 476 }
511 477
512 size_t ThreadHeap::objectPayloadSizeForTesting() { 478 size_t ThreadHeap::objectPayloadSizeForTesting() {
513 // MEMO: is threadAttachMutex locked?
514 size_t objectPayloadSize = 0; 479 size_t objectPayloadSize = 0;
515 for (ThreadState* state : m_threads) { 480 m_threadState->setGCState(ThreadState::GCRunning);
516 state->setGCState(ThreadState::GCRunning); 481 m_threadState->makeConsistentForGC();
517 state->makeConsistentForGC(); 482 objectPayloadSize += m_threadState->objectPayloadSizeForTesting();
518 objectPayloadSize += state->objectPayloadSizeForTesting(); 483 m_threadState->setGCState(ThreadState::Sweeping);
519 state->setGCState(ThreadState::Sweeping); 484 m_threadState->setGCState(ThreadState::NoGCScheduled);
520 state->setGCState(ThreadState::NoGCScheduled);
521 }
522 return objectPayloadSize; 485 return objectPayloadSize;
523 } 486 }
524 487
525 void ThreadHeap::visitPersistentRoots(Visitor* visitor) { 488 void ThreadHeap::visitPersistentRoots(Visitor* visitor) {
526 ASSERT(ThreadState::current()->isInGC()); 489 ASSERT(ThreadState::current()->isInGC());
527 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots"); 490 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots");
528 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor); 491 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor);
529 492
530 for (ThreadState* state : m_threads) 493 m_threadState->visitPersistents(visitor);
531 state->visitPersistents(visitor);
532 } 494 }
533 495
534 void ThreadHeap::visitStackRoots(Visitor* visitor) { 496 void ThreadHeap::visitStackRoots(Visitor* visitor) {
535 ASSERT(ThreadState::current()->isInGC()); 497 ASSERT(ThreadState::current()->isInGC());
536 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots"); 498 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots");
537 for (ThreadState* state : m_threads) 499 m_threadState->visitStack(visitor);
538 state->visitStack(visitor);
539 } 500 }
540 501
541 BasePage* ThreadHeap::lookupPageForAddress(Address address) { 502 BasePage* ThreadHeap::lookupPageForAddress(Address address) {
542 ASSERT(ThreadState::current()->isInGC()); 503 ASSERT(ThreadState::current()->isInGC());
543 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { 504 if (PageMemoryRegion* region = m_regionTree->lookup(address)) {
544 return region->pageFromAddress(address); 505 return region->pageFromAddress(address);
545 } 506 }
546 return nullptr; 507 return nullptr;
547 } 508 }
548 509
549 void ThreadHeap::resetHeapCounters() { 510 void ThreadHeap::resetHeapCounters() {
550 ASSERT(ThreadState::current()->isInGC()); 511 ASSERT(ThreadState::current()->isInGC());
551 512
552 ThreadHeap::reportMemoryUsageForTracing(); 513 ThreadHeap::reportMemoryUsageForTracing();
553 514
554 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()); 515 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize());
555 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); 516 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize());
556 517
557 m_stats.reset(); 518 m_stats.reset();
558 for (ThreadState* state : m_threads) 519 m_threadState->resetHeapCounters();
559 state->resetHeapCounters();
560 } 520 }
561 521
562 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; 522 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr;
563 523
564 } // namespace blink 524 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/Heap.h ('k') | third_party/WebKit/Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698