Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1696)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 618353004: Revert "Oilpan: Replace the positive heap-contains cache with a binary search tree of memory region… (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
262 barrier->doEnterSafePoint(state, stackEnd); 262 barrier->doEnterSafePoint(state, stackEnd);
263 } 263 }
264 264
265 volatile int m_canResume; 265 volatile int m_canResume;
266 volatile int m_unparkedThreadCount; 266 volatile int m_unparkedThreadCount;
267 Mutex m_mutex; 267 Mutex m_mutex;
268 ThreadCondition m_parked; 268 ThreadCondition m_parked;
269 ThreadCondition m_resume; 269 ThreadCondition m_resume;
270 }; 270 };
271 271
272 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state)
273 : m_storage(storage)
274 , m_gcInfo(gcInfo)
275 , m_threadState(state)
276 , m_terminating(false)
277 , m_tracedAfterOrphaned(false)
278 {
279 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
280 }
281
272 // Statically unfold the heap initialization loop so the compiler statically 282 // Statically unfold the heap initialization loop so the compiler statically
273 // knows the heap index when using HeapIndexTrait. 283 // knows the heap index when using HeapIndexTrait.
274 template<int num> struct InitializeHeaps { 284 template<int num> struct InitializeHeaps {
275 static const int index = num - 1; 285 static const int index = num - 1;
276 static void init(BaseHeap** heaps, ThreadState* state) 286 static void init(BaseHeap** heaps, ThreadState* state)
277 { 287 {
278 InitializeHeaps<index>::init(heaps, state); 288 InitializeHeaps<index>::init(heaps, state);
279 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index ); 289 heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index );
280 } 290 }
281 }; 291 };
(...skipping 11 matching lines...) Expand all
293 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 303 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
294 , m_safePointScopeMarker(0) 304 , m_safePointScopeMarker(0)
295 , m_atSafePoint(false) 305 , m_atSafePoint(false)
296 , m_interruptors() 306 , m_interruptors()
297 , m_gcRequested(false) 307 , m_gcRequested(false)
298 , m_forcePreciseGCForTesting(false) 308 , m_forcePreciseGCForTesting(false)
299 , m_sweepRequested(0) 309 , m_sweepRequested(0)
300 , m_sweepInProgress(false) 310 , m_sweepInProgress(false)
301 , m_noAllocationCount(0) 311 , m_noAllocationCount(0)
302 , m_inGC(false) 312 , m_inGC(false)
313 , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
303 , m_isTerminating(false) 314 , m_isTerminating(false)
304 , m_lowCollectionRate(false) 315 , m_lowCollectionRate(false)
305 , m_numberOfSweeperTasks(0) 316 , m_numberOfSweeperTasks(0)
306 #if defined(ADDRESS_SANITIZER) 317 #if defined(ADDRESS_SANITIZER)
307 , m_asanFakeStack(__asan_get_current_fake_stack()) 318 , m_asanFakeStack(__asan_get_current_fake_stack())
308 #endif 319 #endif
309 { 320 {
310 ASSERT(!**s_threadSpecific); 321 ASSERT(!**s_threadSpecific);
311 **s_threadSpecific = this; 322 **s_threadSpecific = this;
312 323
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
563 visitAsanFakeStackForPointer(visitor, ptr); 574 visitAsanFakeStackForPointer(visitor, ptr);
564 } 575 }
565 } 576 }
566 577
567 void ThreadState::visitPersistents(Visitor* visitor) 578 void ThreadState::visitPersistents(Visitor* visitor)
568 { 579 {
569 m_persistents->trace(visitor); 580 m_persistents->trace(visitor);
570 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); 581 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor);
571 } 582 }
572 583
584 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
585 {
586 // If thread is terminating ignore conservative pointers.
587 if (m_isTerminating)
588 return false;
589
590 // This checks for normal pages and for large objects which span the extent
591 // of several normal pages.
592 BaseHeapPage* page = heapPageFromAddress(address);
593 if (page) {
594 page->checkAndMarkPointer(visitor, address);
595 // Whether or not the pointer was within an object it was certainly
596 // within a page that is part of the heap, so we don't want to ask the
597 // other other heaps or put this address in the
598 // HeapDoesNotContainCache.
599 return true;
600 }
601
602 return false;
603 }
604
573 #if ENABLE(GC_PROFILE_MARKING) 605 #if ENABLE(GC_PROFILE_MARKING)
574 const GCInfo* ThreadState::findGCInfo(Address address) 606 const GCInfo* ThreadState::findGCInfo(Address address)
575 { 607 {
576 BaseHeapPage* page = heapPageFromAddress(address); 608 BaseHeapPage* page = heapPageFromAddress(address);
577 if (page) { 609 if (page) {
578 return page->findGCInfo(address); 610 return page->findGCInfo(address);
579 } 611 }
580 return 0; 612 return 0;
581 } 613 }
582 #endif 614 #endif
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
845 heap->makeConsistentForSweeping(); 877 heap->makeConsistentForSweeping();
846 // If a new GC is requested before this thread got around to sweep, ie. due to the 878 // If a new GC is requested before this thread got around to sweep, ie. due to the
847 // thread doing a long running operation, we clear the mark bits and mar k any of 879 // thread doing a long running operation, we clear the mark bits and mar k any of
848 // the dead objects as dead. The latter is used to ensure the next GC ma rking does 880 // the dead objects as dead. The latter is used to ensure the next GC ma rking does
849 // not trace already dead objects. If we trace a dead object we could en d up tracing 881 // not trace already dead objects. If we trace a dead object we could en d up tracing
850 // into garbage or the middle of another object via the newly conservati vely found 882 // into garbage or the middle of another object via the newly conservati vely found
851 // object. 883 // object.
852 if (sweepRequested()) 884 if (sweepRequested())
853 heap->clearLiveAndMarkDead(); 885 heap->clearLiveAndMarkDead();
854 } 886 }
855 // Add the regions allocated by this thread to the region search tree.
856 for (size_t i = 0; i < m_allocatedRegionsSinceLastGC.size(); ++i)
857 Heap::addPageMemoryRegion(m_allocatedRegionsSinceLastGC[i]);
858 m_allocatedRegionsSinceLastGC.clear();
859 setSweepRequested(); 887 setSweepRequested();
860 } 888 }
861 889
862 void ThreadState::setupHeapsForTermination() 890 void ThreadState::setupHeapsForTermination()
863 { 891 {
864 for (int i = 0; i < NumberOfHeaps; i++) 892 for (int i = 0; i < NumberOfHeaps; i++)
865 m_heaps[i]->prepareHeapForTermination(); 893 m_heaps[i]->prepareHeapForTermination();
866 } 894 }
867 895
868 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) 896 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
869 { 897 {
898 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
899 #if !ENABLE(ASSERT)
900 if (cachedPage)
901 return cachedPage;
902 #endif
903
870 for (int i = 0; i < NumberOfHeaps; i++) { 904 for (int i = 0; i < NumberOfHeaps; i++) {
871 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address)) 905 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
906 if (page) {
907 // Asserts that make sure heapPageFromAddress takes addresses from
908 // the whole aligned blinkPageSize memory area. This is necessary
909 // for the negative cache to work.
910 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddr ess(roundToBlinkPageStart(address)));
911 if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address))
912 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFrom Address(roundToBlinkPageEnd(address) - 1));
913 ASSERT(!cachedPage || page == cachedPage);
914 if (!cachedPage)
915 heapContainsCache()->addEntry(address, page);
872 return page; 916 return page;
917 }
873 } 918 }
919 ASSERT(!cachedPage);
874 return 0; 920 return 0;
875 } 921 }
876 922
877 void ThreadState::getStats(HeapStats& stats) 923 void ThreadState::getStats(HeapStats& stats)
878 { 924 {
879 stats = m_stats; 925 stats = m_stats;
880 #if ENABLE(ASSERT) 926 #if ENABLE(ASSERT)
881 if (isConsistentForSweeping()) { 927 if (isConsistentForSweeping()) {
882 HeapStats scannedStats; 928 HeapStats scannedStats;
883 for (int i = 0; i < NumberOfHeaps; i++) 929 for (int i = 0; i < NumberOfHeaps; i++)
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
1216 return gcInfo; 1262 return gcInfo;
1217 } 1263 }
1218 } 1264 }
1219 if (needLockForIteration) 1265 if (needLockForIteration)
1220 threadAttachMutex().unlock(); 1266 threadAttachMutex().unlock();
1221 return 0; 1267 return 0;
1222 } 1268 }
1223 #endif 1269 #endif
1224 1270
1225 } 1271 }
OLDNEW
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698