Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 616483002: Oilpan: Replace the positive heap-contains cache with a binary search tree of memory regions. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: RC2 Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after
303 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 303 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
304 , m_safePointScopeMarker(0) 304 , m_safePointScopeMarker(0)
305 , m_atSafePoint(false) 305 , m_atSafePoint(false)
306 , m_interruptors() 306 , m_interruptors()
307 , m_gcRequested(false) 307 , m_gcRequested(false)
308 , m_forcePreciseGCForTesting(false) 308 , m_forcePreciseGCForTesting(false)
309 , m_sweepRequested(0) 309 , m_sweepRequested(0)
310 , m_sweepInProgress(false) 310 , m_sweepInProgress(false)
311 , m_noAllocationCount(0) 311 , m_noAllocationCount(0)
312 , m_inGC(false) 312 , m_inGC(false)
313 , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
314 , m_isTerminating(false) 313 , m_isTerminating(false)
315 , m_lowCollectionRate(false) 314 , m_lowCollectionRate(false)
316 , m_numberOfSweeperTasks(0) 315 , m_numberOfSweeperTasks(0)
317 #if defined(ADDRESS_SANITIZER) 316 #if defined(ADDRESS_SANITIZER)
318 , m_asanFakeStack(__asan_get_current_fake_stack()) 317 , m_asanFakeStack(__asan_get_current_fake_stack())
319 #endif 318 #endif
320 { 319 {
321 ASSERT(!**s_threadSpecific); 320 ASSERT(!**s_threadSpecific);
322 **s_threadSpecific = this; 321 **s_threadSpecific = this;
323 322
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
574 visitAsanFakeStackForPointer(visitor, ptr); 573 visitAsanFakeStackForPointer(visitor, ptr);
575 } 574 }
576 } 575 }
577 576
578 void ThreadState::visitPersistents(Visitor* visitor) 577 void ThreadState::visitPersistents(Visitor* visitor)
579 { 578 {
580 m_persistents->trace(visitor); 579 m_persistents->trace(visitor);
581 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor); 580 WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor);
582 } 581 }
583 582
584 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
585 {
586 // If thread is terminating ignore conservative pointers.
587 if (m_isTerminating)
588 return false;
589
590 // This checks for normal pages and for large objects which span the extent
591 // of several normal pages.
592 BaseHeapPage* page = heapPageFromAddress(address);
593 if (page) {
594 page->checkAndMarkPointer(visitor, address);
595 // Whether or not the pointer was within an object it was certainly
596 // within a page that is part of the heap, so we don't want to ask the
597 // other other heaps or put this address in the
598 // HeapDoesNotContainCache.
599 return true;
600 }
601
602 return false;
603 }
604
605 #if ENABLE(GC_PROFILE_MARKING) 583 #if ENABLE(GC_PROFILE_MARKING)
606 const GCInfo* ThreadState::findGCInfo(Address address) 584 const GCInfo* ThreadState::findGCInfo(Address address)
607 { 585 {
608 BaseHeapPage* page = heapPageFromAddress(address); 586 BaseHeapPage* page = heapPageFromAddress(address);
609 if (page) { 587 if (page) {
610 return page->findGCInfo(address); 588 return page->findGCInfo(address);
611 } 589 }
612 return 0; 590 return 0;
613 } 591 }
614 #endif 592 #endif
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
877 heap->makeConsistentForSweeping(); 855 heap->makeConsistentForSweeping();
878 // If a new GC is requested before this thread got around to sweep, ie. due to the 856 // If a new GC is requested before this thread got around to sweep, ie. due to the
879 // thread doing a long running operation, we clear the mark bits and mar k any of 857 // thread doing a long running operation, we clear the mark bits and mar k any of
880 // the dead objects as dead. The latter is used to ensure the next GC ma rking does 858 // the dead objects as dead. The latter is used to ensure the next GC ma rking does
881 // not trace already dead objects. If we trace a dead object we could en d up tracing 859 // not trace already dead objects. If we trace a dead object we could en d up tracing
882 // into garbage or the middle of another object via the newly conservati vely found 860 // into garbage or the middle of another object via the newly conservati vely found
883 // object. 861 // object.
884 if (sweepRequested()) 862 if (sweepRequested())
885 heap->clearLiveAndMarkDead(); 863 heap->clearLiveAndMarkDead();
886 } 864 }
865 // Add the regions allocated by this thread to the region search tree.
866 for (size_t i = 0; i < m_allocatedRegionsSinceLastGC.size(); ++i)
867 Heap::addPageMemoryRegion(m_allocatedRegionsSinceLastGC[i]);
868 m_allocatedRegionsSinceLastGC.clear();
887 setSweepRequested(); 869 setSweepRequested();
888 } 870 }
889 871
890 void ThreadState::setupHeapsForTermination() 872 void ThreadState::setupHeapsForTermination()
891 { 873 {
892 for (int i = 0; i < NumberOfHeaps; i++) 874 for (int i = 0; i < NumberOfHeaps; i++)
893 m_heaps[i]->prepareHeapForTermination(); 875 m_heaps[i]->prepareHeapForTermination();
894 } 876 }
895 877
896 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) 878 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
897 { 879 {
898 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
899 #if !ENABLE(ASSERT)
900 if (cachedPage)
901 return cachedPage;
902 #endif
903
904 for (int i = 0; i < NumberOfHeaps; i++) { 880 for (int i = 0; i < NumberOfHeaps; i++) {
905 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); 881 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address))
906 if (page) {
907 // Asserts that make sure heapPageFromAddress takes addresses from
908 // the whole aligned blinkPageSize memory area. This is necessary
909 // for the negative cache to work.
910 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddr ess(roundToBlinkPageStart(address)));
911 if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address))
912 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFrom Address(roundToBlinkPageEnd(address) - 1));
913 ASSERT(!cachedPage || page == cachedPage);
914 if (!cachedPage)
915 heapContainsCache()->addEntry(address, page);
916 return page; 882 return page;
917 }
918 } 883 }
919 ASSERT(!cachedPage);
920 return 0; 884 return 0;
921 } 885 }
922 886
923 void ThreadState::getStats(HeapStats& stats) 887 void ThreadState::getStats(HeapStats& stats)
924 { 888 {
925 stats = m_stats; 889 stats = m_stats;
926 #if ENABLE(ASSERT) 890 #if ENABLE(ASSERT)
927 if (isConsistentForSweeping()) { 891 if (isConsistentForSweeping()) {
928 HeapStats scannedStats; 892 HeapStats scannedStats;
929 for (int i = 0; i < NumberOfHeaps; i++) 893 for (int i = 0; i < NumberOfHeaps; i++)
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
1262 return gcInfo; 1226 return gcInfo;
1263 } 1227 }
1264 } 1228 }
1265 if (needLockForIteration) 1229 if (needLockForIteration)
1266 threadAttachMutex().unlock(); 1230 threadAttachMutex().unlock();
1267 return 0; 1231 return 0;
1268 } 1232 }
1269 #endif 1233 #endif
1270 1234
1271 } 1235 }
OLDNEW
« Source/platform/heap/ThreadState.h ('K') | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698