Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 616483002: Oilpan: Replace the positive heap-contains cache with a binary search tree of memory regions. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: remove ThreadState::checkAndMarkPointer Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
118 , m_size(size) 118 , m_size(size)
119 { 119 {
120 ASSERT(size > 0); 120 ASSERT(size > 0);
121 } 121 }
122 122
123 bool contains(Address addr) const 123 bool contains(Address addr) const
124 { 124 {
125 return m_base <= addr && addr < (m_base + m_size); 125 return m_base <= addr && addr < (m_base + m_size);
126 } 126 }
127 127
128
129 bool contains(const MemoryRegion& other) const 128 bool contains(const MemoryRegion& other) const
130 { 129 {
131 return contains(other.m_base) && contains(other.m_base + other.m_size - 1); 130 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
132 } 131 }
133 132
134 void release() 133 void release()
135 { 134 {
136 #if OS(POSIX) 135 #if OS(POSIX)
137 int err = munmap(m_base, m_size); 136 int err = munmap(m_base, m_size);
138 RELEASE_ASSERT(!err); 137 RELEASE_ASSERT(!err);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
180 // whole. The PageMemoryRegion allows us to do that by keeping track 179 // whole. The PageMemoryRegion allows us to do that by keeping track
181 // of the number of pages using it in order to be able to release all 180 // of the number of pages using it in order to be able to release all
182 // of the virtual address space when there are no more pages using it. 181 // of the virtual address space when there are no more pages using it.
183 class PageMemoryRegion : public MemoryRegion { 182 class PageMemoryRegion : public MemoryRegion {
184 public: 183 public:
185 ~PageMemoryRegion() 184 ~PageMemoryRegion()
186 { 185 {
187 release(); 186 release();
188 } 187 }
189 188
190 void pageRemoved() 189 void pageDeleted(Address page)
191 { 190 {
192 if (!--m_numPages) 191 decommitPage(page);
Mads Ager (chromium) 2014/10/01 11:29:01 Can we use another name than decommit/commit here?
zerny-chromium 2014/10/01 12:05:34 Done.
192 if (!--m_numPages) {
193 Heap::removePageMemoryRegion(this);
193 delete this; 194 delete this;
195 }
196 }
197
198 void commitPage(Address page)
199 {
200 ASSERT(!m_committed[index(page)]);
201 m_committed[index(page)] = true;
202 }
203
204 void decommitPage(Address page)
205 {
206 m_committed[index(page)] = false;
194 } 207 }
195 208
196 static PageMemoryRegion* allocate(size_t size, unsigned numPages) 209 static PageMemoryRegion* allocate(size_t size, unsigned numPages)
197 { 210 {
198 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); 211 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
199 212
200 // Compute a random blink page aligned address for the page memory 213 // Compute a random blink page aligned address for the page memory
201 // region and attempt to get the memory there. 214 // region and attempt to get the memory there.
202 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ()); 215 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ());
203 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); 216 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 280
268 // FIXME: If base is by accident blink page size aligned 281 // FIXME: If base is by accident blink page size aligned
269 // here then we can create two pages out of reserved 282 // here then we can create two pages out of reserved
270 // space. Do this. 283 // space. Do this.
271 Address alignedBase = roundToBlinkPageBoundary(base); 284 Address alignedBase = roundToBlinkPageBoundary(base);
272 285
273 return new PageMemoryRegion(alignedBase, size, numPages); 286 return new PageMemoryRegion(alignedBase, size, numPages);
274 #endif 287 #endif
275 } 288 }
276 289
290 BaseHeapPage* pageFromAddress(Address address)
291 {
292 ASSERT(contains(address));
293 if (!m_committed[index(address)])
294 return 0;
295 if (m_isLargePage)
296 return pageHeaderFromObject(base());
297 return pageHeaderFromObject(address);
298 }
299
277 private: 300 private:
278 PageMemoryRegion(Address base, size_t size, unsigned numPages) 301 PageMemoryRegion(Address base, size_t size, unsigned numPages)
279 : MemoryRegion(base, size) 302 : MemoryRegion(base, size)
303 , m_isLargePage(numPages == 1)
280 , m_numPages(numPages) 304 , m_numPages(numPages)
281 { 305 {
306 for (size_t i = 0; i < blinkPagesPerRegion; ++i)
307 m_committed[i] = false;
282 } 308 }
283 309
310 unsigned index(Address address)
311 {
312 ASSERT(contains(address));
313 if (m_isLargePage)
314 return 0;
315 return (address - base()) / blinkPageSize;
zerny-chromium 2014/10/01 10:34:59 This was incorrect. Forgot to add the guard page t
zerny-chromium 2014/10/01 11:06:33 No, it is correct. The first guard page is part of
316 }
317
318 bool m_isLargePage;
319 bool m_committed[blinkPagesPerRegion];
zerny-chromium 2014/10/01 09:28:54 Regarding PageMemory state in the region, it seems
Mads Ager (chromium) 2014/10/01 11:29:01 Should we use slightly different terminology here?
zerny-chromium 2014/10/01 12:05:34 Done.
284 unsigned m_numPages; 320 unsigned m_numPages;
285 }; 321 };
286 322
287 // Representation of the memory used for a Blink heap page. 323 // Representation of the memory used for a Blink heap page.
288 // 324 //
289 // The representation keeps track of two memory regions: 325 // The representation keeps track of two memory regions:
290 // 326 //
291 // 1. The virtual memory reserved from the system in order to be able 327 // 1. The virtual memory reserved from the system in order to be able
292 // to free all the virtual memory reserved. Multiple PageMemory 328 // to free all the virtual memory reserved. Multiple PageMemory
293 // instances can share the same reserved memory region and 329 // instances can share the same reserved memory region and
294 // therefore notify the reserved memory region on destruction so 330 // therefore notify the reserved memory region on destruction so
295 // that the system memory can be given back when all PageMemory 331 // that the system memory can be given back when all PageMemory
296 // instances for that memory are gone. 332 // instances for that memory are gone.
297 // 333 //
298 // 2. The writable memory (a sub-region of the reserved virtual 334 // 2. The writable memory (a sub-region of the reserved virtual
299 // memory region) that is used for the actual heap page payload. 335 // memory region) that is used for the actual heap page payload.
300 // 336 //
301 // Guard pages are created before and after the writable memory. 337 // Guard pages are created before and after the writable memory.
302 class PageMemory { 338 class PageMemory {
303 public: 339 public:
304 ~PageMemory() 340 ~PageMemory()
305 { 341 {
306 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); 342 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
307 m_reserved->pageRemoved(); 343 m_reserved->pageDeleted(writableStart());
308 } 344 }
309 345
310 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } 346 bool commit() WARN_UNUSED_RETURN
311 void decommit() { m_writable.decommit(); } 347 {
348 m_reserved->commitPage(writableStart());
349 return m_writable.commit();
350 }
351
352 void decommit()
353 {
354 m_reserved->decommitPage(writableStart());
355 m_writable.decommit();
356 }
357
358 PageMemoryRegion* region() { return m_reserved; }
312 359
313 Address writableStart() { return m_writable.base(); } 360 Address writableStart() { return m_writable.base(); }
314 361
315 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) 362 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
316 { 363 {
317 // Setup the payload one OS page into the page memory. The 364 // Setup the payload one OS page into the page memory. The
318 // first os page is the guard page. 365 // first os page is the guard page.
319 Address payloadAddress = region->base() + pageOffset + osPageSize(); 366 Address payloadAddress = region->base() + pageOffset + osPageSize();
320 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ; 367 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ;
321 } 368 }
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
602 ThreadHeap<Header>::~ThreadHeap() 649 ThreadHeap<Header>::~ThreadHeap()
603 { 650 {
604 ASSERT(!m_firstPage); 651 ASSERT(!m_firstPage);
605 ASSERT(!m_firstLargeHeapObject); 652 ASSERT(!m_firstLargeHeapObject);
606 } 653 }
607 654
608 template<typename Header> 655 template<typename Header>
609 void ThreadHeap<Header>::cleanupPages() 656 void ThreadHeap<Header>::cleanupPages()
610 { 657 {
611 clearFreeLists(); 658 clearFreeLists();
612 flushHeapContainsCache();
613 659
614 // Add the ThreadHeap's pages to the orphanedPagePool. 660 // Add the ThreadHeap's pages to the orphanedPagePool.
615 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) 661 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
616 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 662 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
617 m_firstPage = 0; 663 m_firstPage = 0;
618 664
619 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) 665 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
620 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 666 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
621 m_firstLargeHeapObject = 0; 667 m_firstLargeHeapObject = 0;
622 } 668 }
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 964
919 // If ASan is supported we add allocationGranularity bytes to the allocated space and 965 // If ASan is supported we add allocationGranularity bytes to the allocated space and
920 // poison that to detect overflows 966 // poison that to detect overflows
921 #if defined(ADDRESS_SANITIZER) 967 #if defined(ADDRESS_SANITIZER)
922 allocationSize += allocationGranularity; 968 allocationSize += allocationGranularity;
923 #endif 969 #endif
924 if (threadState()->shouldGC()) 970 if (threadState()->shouldGC())
925 threadState()->setGCRequested(); 971 threadState()->setGCRequested();
926 Heap::flushHeapDoesNotContainCache(); 972 Heap::flushHeapDoesNotContainCache();
927 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 973 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
974 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
928 Address largeObjectAddress = pageMemory->writableStart(); 975 Address largeObjectAddress = pageMemory->writableStart();
929 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); 976 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
930 memset(headerAddress, 0, size); 977 memset(headerAddress, 0, size);
931 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 978 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
932 Address result = headerAddress + sizeof(*header); 979 Address result = headerAddress + sizeof(*header);
933 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 980 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
934 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 981 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
935 982
936 // Poison the object header and allocationGranularity bytes after the object 983 // Poison the object header and allocationGranularity bytes after the object
937 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 984 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
938 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 985 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
939 largeObject->link(&m_firstLargeHeapObject); 986 largeObject->link(&m_firstLargeHeapObject);
940 stats().increaseAllocatedSpace(largeObject->size()); 987 stats().increaseAllocatedSpace(largeObject->size());
941 stats().increaseObjectSpace(largeObject->payloadSize()); 988 stats().increaseObjectSpace(largeObject->payloadSize());
942 return result; 989 return result;
943 } 990 }
944 991
945 template<typename Header> 992 template<typename Header>
946 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 993 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
947 { 994 {
948 flushHeapContainsCache();
949 object->unlink(previousNext); 995 object->unlink(previousNext);
950 object->finalize(); 996 object->finalize();
951 997
952 // Unpoison the object header and allocationGranularity bytes after the 998 // Unpoison the object header and allocationGranularity bytes after the
953 // object before freeing. 999 // object before freeing.
954 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 1000 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
955 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1001 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
956 1002
957 if (object->terminating()) { 1003 if (object->terminating()) {
958 ASSERT(ThreadState::current()->isTerminating()); 1004 ASSERT(ThreadState::current()->isTerminating());
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
1142 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 1188 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
1143 // since it is the same for all objects 1189 // since it is the same for all objects
1144 ASSERT(gcInfo); 1190 ASSERT(gcInfo);
1145 allocatePage(gcInfo); 1191 allocatePage(gcInfo);
1146 } 1192 }
1147 1193
1148 template <typename Header> 1194 template <typename Header>
1149 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) 1195 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1150 { 1196 {
1151 MutexLocker locker(m_threadState->sweepMutex()); 1197 MutexLocker locker(m_threadState->sweepMutex());
1152 flushHeapContainsCache();
1153 if (page->terminating()) { 1198 if (page->terminating()) {
1154 // The thread is shutting down so this page is being removed as part 1199 // The thread is shutting down so this page is being removed as part
1155 // of a thread local GC. In that case the page could be accessed in the 1200 // of a thread local GC. In that case the page could be accessed in the
1156 // next global GC either due to a dead object being traced via a 1201 // next global GC either due to a dead object being traced via a
1157 // conservative pointer or due to a programming error where an object 1202 // conservative pointer or due to a programming error where an object
1158 // in another thread heap keeps a dangling pointer to this object. 1203 // in another thread heap keeps a dangling pointer to this object.
1159 // To guard against this we put the page in the orphanedPagePool to 1204 // To guard against this we put the page in the orphanedPagePool to
1160 // ensure it is still reachable. After the next global GC it can be 1205 // ensure it is still reachable. After the next global GC it can be
1161 // decommitted and moved to the page pool assuming no rogue/dangling 1206 // decommitted and moved to the page pool assuming no rogue/dangling
1162 // pointers refer to it. 1207 // pointers refer to it.
(...skipping 13 matching lines...) Expand all
1176 // We continue allocating page memory until we succeed in getting one. 1221 // We continue allocating page memory until we succeed in getting one.
1177 // Since the FreePagePool is global other threads could use all the 1222 // Since the FreePagePool is global other threads could use all the
1178 // newly allocated page memory before this thread calls takeFreePage. 1223 // newly allocated page memory before this thread calls takeFreePage.
1179 while (!pageMemory) { 1224 while (!pageMemory) {
1180 // Allocate a memory region for blinkPagesPerRegion pages that 1225 // Allocate a memory region for blinkPagesPerRegion pages that
1181 // will each have the following layout. 1226 // will each have the following layout.
1182 // 1227 //
1183 // [ guard os page | ... payload ... | guard os page ] 1228 // [ guard os page | ... payload ... | guard os page ]
1184 // ^---{ aligned to blink page size } 1229 // ^---{ aligned to blink page size }
1185 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); 1230 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion);
1231 m_threadState->allocatedRegionsSinceLastGC().append(region);
1186 // Setup the PageMemory object for each of the pages in the 1232 // Setup the PageMemory object for each of the pages in the
1187 // region. 1233 // region.
1188 size_t offset = 0; 1234 size_t offset = 0;
1189 for (size_t i = 0; i < blinkPagesPerRegion; i++) { 1235 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1190 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize())); 1236 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize()));
1191 offset += blinkPageSize; 1237 offset += blinkPageSize;
1192 } 1238 }
1193 pageMemory = Heap::freePagePool()->takeFreePage(m_index); 1239 pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1194 } 1240 }
1195 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 1241 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after
1742 } 1788 }
1743 1789
1744 if (json) { 1790 if (json) {
1745 json->setInteger("class", tag); 1791 json->setInteger("class", tag);
1746 json->setInteger("size", header->size()); 1792 json->setInteger("size", header->size());
1747 json->setInteger("isMarked", isMarked()); 1793 json->setInteger("isMarked", isMarked());
1748 } 1794 }
1749 } 1795 }
1750 #endif 1796 #endif
1751 1797
1752 template<typename Entry> 1798 void HeapDoesNotContainCache::flush()
1753 void HeapExtentCache<Entry>::flush()
1754 { 1799 {
1755 if (m_hasEntries) { 1800 if (m_hasEntries) {
1756 for (int i = 0; i < numberOfEntries; i++) 1801 for (int i = 0; i < numberOfEntries; i++)
1757 m_entries[i] = Entry(); 1802 m_entries[i] = 0;
1758 m_hasEntries = false; 1803 m_hasEntries = false;
1759 } 1804 }
1760 } 1805 }
1761 1806
1762 template<typename Entry> 1807 size_t HeapDoesNotContainCache::hash(Address address)
1763 size_t HeapExtentCache<Entry>::hash(Address address)
1764 { 1808 {
1765 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); 1809 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1766 value ^= value >> numberOfEntriesLog2; 1810 value ^= value >> numberOfEntriesLog2;
1767 value ^= value >> (numberOfEntriesLog2 * 2); 1811 value ^= value >> (numberOfEntriesLog2 * 2);
1768 value &= numberOfEntries - 1; 1812 value &= numberOfEntries - 1;
1769 return value & ~1; // Returns only even number. 1813 return value & ~1; // Returns only even number.
1770 } 1814 }
1771 1815
1772 template<typename Entry> 1816 bool HeapDoesNotContainCache::lookup(Address address)
1773 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1774 { 1817 {
1775 size_t index = hash(address); 1818 size_t index = hash(address);
1776 ASSERT(!(index & 1)); 1819 ASSERT(!(index & 1));
1777 Address cachePage = roundToBlinkPageStart(address); 1820 Address cachePage = roundToBlinkPageStart(address);
1778 if (m_entries[index].address() == cachePage) 1821 if (m_entries[index] == cachePage)
1779 return m_entries[index].result(); 1822 return m_entries[index];
1780 if (m_entries[index + 1].address() == cachePage) 1823 if (m_entries[index + 1] == cachePage)
1781 return m_entries[index + 1].result(); 1824 return m_entries[index + 1];
1782 return 0; 1825 return 0;
1783 } 1826 }
1784 1827
1785 template<typename Entry> 1828 void HeapDoesNotContainCache::addEntry(Address address)
1786 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes ult entry)
1787 { 1829 {
1788 m_hasEntries = true; 1830 m_hasEntries = true;
1789 size_t index = hash(address); 1831 size_t index = hash(address);
1790 ASSERT(!(index & 1)); 1832 ASSERT(!(index & 1));
1791 Address cachePage = roundToBlinkPageStart(address); 1833 Address cachePage = roundToBlinkPageStart(address);
1792 m_entries[index + 1] = m_entries[index]; 1834 m_entries[index + 1] = m_entries[index];
1793 m_entries[index] = Entry(cachePage, entry); 1835 m_entries[index] = cachePage;
1794 }
1795
1796 // These should not be needed, but it seems impossible to persuade clang to
1797 // instantiate the template functions and export them from a shared library, so
1798 // we add these in the non-templated subclass, which does not have that issue.
1799 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1800 {
1801 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1802 }
1803
1804 BaseHeapPage* HeapContainsCache::lookup(Address address)
1805 {
1806 return HeapExtentCache<PositiveEntry>::lookup(address);
1807 } 1836 }
1808 1837
1809 void Heap::flushHeapDoesNotContainCache() 1838 void Heap::flushHeapDoesNotContainCache()
1810 { 1839 {
1811 s_heapDoesNotContainCache->flush(); 1840 s_heapDoesNotContainCache->flush();
1812 } 1841 }
1813 1842
1814 // The marking mutex is used to ensure sequential access to data 1843 // The marking mutex is used to ensure sequential access to data
1815 // structures during marking. The marking mutex needs to be acquired 1844 // structures during marking. The marking mutex needs to be acquired
1816 // during marking when elements are taken from the global marking 1845 // during marking when elements are taken from the global marking
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
2099 delete s_orphanedPagePool; 2128 delete s_orphanedPagePool;
2100 s_orphanedPagePool = 0; 2129 s_orphanedPagePool = 0;
2101 delete s_weakCallbackStack; 2130 delete s_weakCallbackStack;
2102 s_weakCallbackStack = 0; 2131 s_weakCallbackStack = 0;
2103 delete s_postMarkingCallbackStack; 2132 delete s_postMarkingCallbackStack;
2104 s_postMarkingCallbackStack = 0; 2133 s_postMarkingCallbackStack = 0;
2105 delete s_markingStack; 2134 delete s_markingStack;
2106 s_markingStack = 0; 2135 s_markingStack = 0;
2107 delete s_ephemeronStack; 2136 delete s_ephemeronStack;
2108 s_ephemeronStack = 0; 2137 s_ephemeronStack = 0;
2138 delete s_regionTree;
2139 s_regionTree = 0;
2109 ThreadState::shutdown(); 2140 ThreadState::shutdown();
2110 } 2141 }
2111 2142
2112 BaseHeapPage* Heap::contains(Address address) 2143 BaseHeapPage* Heap::contains(Address address)
2113 { 2144 {
2114 ASSERT(ThreadState::isAnyThreadInGC()); 2145 ASSERT(ThreadState::isAnyThreadInGC());
2115 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2146 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2116 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2147 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2117 BaseHeapPage* page = (*it)->contains(address); 2148 BaseHeapPage* page = (*it)->contains(address);
2118 if (page) 2149 if (page)
(...skipping 11 matching lines...) Expand all
2130 2161
2131 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 2162 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2132 { 2163 {
2133 ASSERT(ThreadState::isAnyThreadInGC()); 2164 ASSERT(ThreadState::isAnyThreadInGC());
2134 2165
2135 #if !ENABLE(ASSERT) 2166 #if !ENABLE(ASSERT)
2136 if (s_heapDoesNotContainCache->lookup(address)) 2167 if (s_heapDoesNotContainCache->lookup(address))
2137 return 0; 2168 return 0;
2138 #endif 2169 #endif
2139 2170
2140 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2171 if (BaseHeapPage* page = lookup(address)) {
2141 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2172 ASSERT(page->contains(address));
2142 if ((*it)->checkAndMarkPointer(visitor, address)) { 2173 ASSERT(!s_heapDoesNotContainCache->lookup(address));
2143 // Pointer was in a page of that thread. If it actually pointed 2174 // TODO: What if the thread owning this page is terminating?
zerny-chromium 2014/10/01 09:28:54 This issue changes the objects which might be mark
Mads Ager (chromium) 2014/10/01 11:29:01 My gut reaction is that it would be best to filter
zerny-chromium 2014/10/01 12:05:34 As discussed offline, we will mark the pointer on
2144 // into an object then that object was found and marked. 2175 page->checkAndMarkPointer(visitor, address);
2145 ASSERT(!s_heapDoesNotContainCache->lookup(address)); 2176 // TODO: We only need to set the conservative flag if checkAndMarkPointe r actually marked the pointer.
2146 s_lastGCWasConservative = true; 2177 s_lastGCWasConservative = true;
2147 return address; 2178 return address;
2148 }
2149 } 2179 }
2150 2180
2151 #if !ENABLE(ASSERT) 2181 #if !ENABLE(ASSERT)
2152 s_heapDoesNotContainCache->addEntry(address, true); 2182 s_heapDoesNotContainCache->addEntry(address);
2153 #else 2183 #else
2154 if (!s_heapDoesNotContainCache->lookup(address)) 2184 if (!s_heapDoesNotContainCache->lookup(address))
2155 s_heapDoesNotContainCache->addEntry(address, true); 2185 s_heapDoesNotContainCache->addEntry(address);
2156 #endif 2186 #endif
2157 return 0; 2187 return 0;
2158 } 2188 }
2159 2189
2160 #if ENABLE(GC_PROFILE_MARKING) 2190 #if ENABLE(GC_PROFILE_MARKING)
2161 const GCInfo* Heap::findGCInfo(Address address) 2191 const GCInfo* Heap::findGCInfo(Address address)
2162 { 2192 {
2163 return ThreadState::findGCInfoFromAllThreads(address); 2193 return ThreadState::findGCInfoFromAllThreads(address);
2164 } 2194 }
2165 #endif 2195 #endif
(...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after
2703 2733
2704 HeaderType* header = HeaderType::fromPayload(address); 2734 HeaderType* header = HeaderType::fromPayload(address);
2705 header->checkHeader(); 2735 header->checkHeader();
2706 2736
2707 const GCInfo* gcInfo = header->gcInfo(); 2737 const GCInfo* gcInfo = header->gcInfo();
2708 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); 2738 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
2709 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); 2739 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2710 heap->promptlyFreeObject(header); 2740 heap->promptlyFreeObject(header);
2711 } 2741 }
2712 2742
2743 BaseHeapPage* Heap::lookup(Address address)
2744 {
2745 if (!s_regionTree)
2746 return 0;
2747 if (PageMemoryRegion* region = s_regionTree->lookup(address))
2748 return region->pageFromAddress(address);
2749 return 0;
2750 }
2751
2752 void Heap::removePageMemoryRegion(PageMemoryRegion* region)
2753 {
2754 // Deletion of large objects (and thus their region) can happen concurrently
2755 // on sweeper threads. Removal can also happen during thread shutdown, but
2756 // that case is safe. Regardless, we make removal mutually exclusive and
2757 // reuse the marking mutex which is not in use during any of the two cases.
2758 MutexLocker locker(markingMutex());
Mads Ager (chromium) 2014/10/01 11:29:00 Nit: Maybe just introduced a regionTreeMutex that
zerny-chromium 2014/10/01 12:05:34 Done.
2759 RegionTree::remove(region, &s_regionTree);
2760 }
2761
2762 void Heap::addPageMemoryRegion(PageMemoryRegion* region)
2763 {
2764 // No locking because regions are only added in ThreadState::prepareForGC wh ich is in a GCScope.
Mads Ager (chromium) 2014/10/01 11:29:01 Can we add an assert here to trigger if we ever at
zerny-chromium 2014/10/01 12:05:34 Done.
2765 RegionTree::add(new RegionTree(region), &s_regionTree);
2766 }
2767
2768 PageMemoryRegion* Heap::RegionTree::lookup(Address address)
2769 {
2770 RegionTree* current = s_regionTree;
2771 while (current) {
2772 Address base = current->m_region->base();
2773 if (address < base) {
2774 current = current->m_left;
2775 continue;
2776 }
2777 if (address >= base + current->m_region->size()) {
2778 current = current->m_right;
2779 continue;
2780 }
2781 ASSERT(current->m_region->contains(address));
2782 return current->m_region;
2783 }
2784 return 0;
2785 }
2786
2787 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
2788 {
2789 if (!newTree)
Mads Ager (chromium) 2014/10/01 11:29:01 ASSERT(newTree) instead? Do we ever actually want
zerny-chromium 2014/10/01 12:05:34 I use the possibility of adding a null in RegionTr
2790 return;
2791 Address base = newTree->m_region->base();
2792 for (RegionTree* current = *context; current; current = *context) {
2793 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
2794 }
2795 *context = newTree;
2796 }
2797
2798 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
2799 {
2800 ASSERT(region);
2801 ASSERT(context);
2802 Address base = region->base();
2803 RegionTree* current = *context;
2804 ASSERT(current);
2805 while (region != current->m_region) {
2806 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
2807 current = *context;
2808 ASSERT(current);
2809 }
2810 *context = 0;
2811 add(current->m_left, context);
2812 add(current->m_right, context);
2813 current->m_left = 0;
2814 current->m_right = 0;
2815 delete current;
2816 }
2817
2713 // Force template instantiations for the types that we need. 2818 // Force template instantiations for the types that we need.
2714 template class HeapPage<FinalizedHeapObjectHeader>; 2819 template class HeapPage<FinalizedHeapObjectHeader>;
2715 template class HeapPage<HeapObjectHeader>; 2820 template class HeapPage<HeapObjectHeader>;
2716 template class ThreadHeap<FinalizedHeapObjectHeader>; 2821 template class ThreadHeap<FinalizedHeapObjectHeader>;
2717 template class ThreadHeap<HeapObjectHeader>; 2822 template class ThreadHeap<HeapObjectHeader>;
2718 2823
2719 Visitor* Heap::s_markingVisitor; 2824 Visitor* Heap::s_markingVisitor;
2720 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; 2825 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads;
2721 CallbackStack* Heap::s_markingStack; 2826 CallbackStack* Heap::s_markingStack;
2722 CallbackStack* Heap::s_postMarkingCallbackStack; 2827 CallbackStack* Heap::s_postMarkingCallbackStack;
2723 CallbackStack* Heap::s_weakCallbackStack; 2828 CallbackStack* Heap::s_weakCallbackStack;
2724 CallbackStack* Heap::s_ephemeronStack; 2829 CallbackStack* Heap::s_ephemeronStack;
2725 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2830 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2726 bool Heap::s_shutdownCalled = false; 2831 bool Heap::s_shutdownCalled = false;
2727 bool Heap::s_lastGCWasConservative = false; 2832 bool Heap::s_lastGCWasConservative = false;
2728 FreePagePool* Heap::s_freePagePool; 2833 FreePagePool* Heap::s_freePagePool;
2729 OrphanedPagePool* Heap::s_orphanedPagePool; 2834 OrphanedPagePool* Heap::s_orphanedPagePool;
2835 Heap::RegionTree* Heap::s_regionTree = 0;
2836
2730 } 2837 }
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698