Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(377)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 616483002: Oilpan: Replace the positive heap-contains cache with a binary search tree of memory regions. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: RC2 Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
118 , m_size(size) 118 , m_size(size)
119 { 119 {
120 ASSERT(size > 0); 120 ASSERT(size > 0);
121 } 121 }
122 122
123 bool contains(Address addr) const 123 bool contains(Address addr) const
124 { 124 {
125 return m_base <= addr && addr < (m_base + m_size); 125 return m_base <= addr && addr < (m_base + m_size);
126 } 126 }
127 127
128
129 bool contains(const MemoryRegion& other) const 128 bool contains(const MemoryRegion& other) const
130 { 129 {
131 return contains(other.m_base) && contains(other.m_base + other.m_size - 1); 130 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
132 } 131 }
133 132
134 void release() 133 void release()
135 { 134 {
136 #if OS(POSIX) 135 #if OS(POSIX)
137 int err = munmap(m_base, m_size); 136 int err = munmap(m_base, m_size);
138 RELEASE_ASSERT(!err); 137 RELEASE_ASSERT(!err);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
180 // whole. The PageMemoryRegion allows us to do that by keeping track 179 // whole. The PageMemoryRegion allows us to do that by keeping track
181 // of the number of pages using it in order to be able to release all 180 // of the number of pages using it in order to be able to release all
182 // of the virtual address space when there are no more pages using it. 181 // of the virtual address space when there are no more pages using it.
183 class PageMemoryRegion : public MemoryRegion { 182 class PageMemoryRegion : public MemoryRegion {
184 public: 183 public:
185 ~PageMemoryRegion() 184 ~PageMemoryRegion()
186 { 185 {
187 release(); 186 release();
188 } 187 }
189 188
190 void pageRemoved() 189 void pageDeleted(Address page)
191 { 190 {
192 if (!--m_numPages) 191 markPageUnused(page);
192 if (!--m_numPages) {
193 Heap::removePageMemoryRegion(this);
193 delete this; 194 delete this;
195 }
196 }
197
198 void markPageUsed(Address page)
199 {
200 ASSERT(!m_inUse[index(page)]);
201 m_inUse[index(page)] = true;
202 }
203
204 void markPageUnused(Address page)
205 {
206 m_inUse[index(page)] = false;
194 } 207 }
195 208
196 static PageMemoryRegion* allocate(size_t size, unsigned numPages) 209 static PageMemoryRegion* allocate(size_t size, unsigned numPages)
197 { 210 {
198 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); 211 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
212 ASSERT(numPages == 1 || numPages == blinkPagesPerRegion);
213 ASSERT(numPages == 1 || size % blinkPageSize == numPages);
haraken 2014/10/02 02:31:35 Can we add a comment and mention that 'numPages ==
zerny-chromium 2014/10/02 07:48:42 Done.
199 214
200 // Compute a random blink page aligned address for the page memory 215 // Compute a random blink page aligned address for the page memory
201 // region and attempt to get the memory there. 216 // region and attempt to get the memory there.
202 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ()); 217 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ());
203 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); 218 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
204 219
205 #if OS(POSIX) 220 #if OS(POSIX)
206 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); 221 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
207 RELEASE_ASSERT(base != MAP_FAILED); 222 RELEASE_ASSERT(base != MAP_FAILED);
208 if (base == roundToBlinkPageBoundary(base)) 223 if (base == roundToBlinkPageBoundary(base))
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 282
268 // FIXME: If base is by accident blink page size aligned 283 // FIXME: If base is by accident blink page size aligned
269 // here then we can create two pages out of reserved 284 // here then we can create two pages out of reserved
270 // space. Do this. 285 // space. Do this.
271 Address alignedBase = roundToBlinkPageBoundary(base); 286 Address alignedBase = roundToBlinkPageBoundary(base);
272 287
273 return new PageMemoryRegion(alignedBase, size, numPages); 288 return new PageMemoryRegion(alignedBase, size, numPages);
274 #endif 289 #endif
275 } 290 }
276 291
292 BaseHeapPage* pageFromAddress(Address address)
293 {
294 ASSERT(contains(address));
295 if (!m_inUse[index(address)])
296 return 0;
297 if (m_isLargePage)
298 return pageHeaderFromObject(base());
299 return pageHeaderFromObject(address);
300 }
301
277 private: 302 private:
278 PageMemoryRegion(Address base, size_t size, unsigned numPages) 303 PageMemoryRegion(Address base, size_t size, unsigned numPages)
279 : MemoryRegion(base, size) 304 : MemoryRegion(base, size)
305 , m_isLargePage(numPages == 1)
280 , m_numPages(numPages) 306 , m_numPages(numPages)
281 { 307 {
308 for (size_t i = 0; i < blinkPagesPerRegion; ++i)
309 m_inUse[i] = false;
282 } 310 }
283 311
312 unsigned index(Address address)
313 {
314 ASSERT(contains(address));
315 if (m_isLargePage)
316 return 0;
317 size_t offset = blinkPageAddress(address) - base();
318 ASSERT(offset % blinkPageSize == 0);
319 return offset / blinkPageSize;
320 }
321
322 bool m_isLargePage;
323 bool m_inUse[blinkPagesPerRegion];
284 unsigned m_numPages; 324 unsigned m_numPages;
285 }; 325 };
286 326
287 // Representation of the memory used for a Blink heap page. 327 // Representation of the memory used for a Blink heap page.
288 // 328 //
289 // The representation keeps track of two memory regions: 329 // The representation keeps track of two memory regions:
290 // 330 //
291 // 1. The virtual memory reserved from the system in order to be able 331 // 1. The virtual memory reserved from the system in order to be able
292 // to free all the virtual memory reserved. Multiple PageMemory 332 // to free all the virtual memory reserved. Multiple PageMemory
293 // instances can share the same reserved memory region and 333 // instances can share the same reserved memory region and
294 // therefore notify the reserved memory region on destruction so 334 // therefore notify the reserved memory region on destruction so
295 // that the system memory can be given back when all PageMemory 335 // that the system memory can be given back when all PageMemory
296 // instances for that memory are gone. 336 // instances for that memory are gone.
297 // 337 //
298 // 2. The writable memory (a sub-region of the reserved virtual 338 // 2. The writable memory (a sub-region of the reserved virtual
299 // memory region) that is used for the actual heap page payload. 339 // memory region) that is used for the actual heap page payload.
300 // 340 //
301 // Guard pages are created before and after the writable memory. 341 // Guard pages are created before and after the writable memory.
302 class PageMemory { 342 class PageMemory {
303 public: 343 public:
304 ~PageMemory() 344 ~PageMemory()
305 { 345 {
306 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); 346 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
307 m_reserved->pageRemoved(); 347 m_reserved->pageDeleted(writableStart());
308 } 348 }
309 349
310 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } 350 WARN_UNUSED_RETURN bool commit()
311 void decommit() { m_writable.decommit(); } 351 {
352 m_reserved->markPageUsed(writableStart());
353 return m_writable.commit();
354 }
355
356 void decommit()
357 {
358 m_reserved->markPageUnused(writableStart());
359 m_writable.decommit();
360 }
361
362 PageMemoryRegion* region() { return m_reserved; }
312 363
313 Address writableStart() { return m_writable.base(); } 364 Address writableStart() { return m_writable.base(); }
314 365
315 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) 366 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
316 { 367 {
317 // Setup the payload one OS page into the page memory. The 368 // Setup the payload one OS page into the page memory. The
318 // first os page is the guard page. 369 // first os page is the guard page.
319 Address payloadAddress = region->base() + pageOffset + osPageSize(); 370 Address payloadAddress = region->base() + pageOffset + osPageSize();
320 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ; 371 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ;
321 } 372 }
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
602 ThreadHeap<Header>::~ThreadHeap() 653 ThreadHeap<Header>::~ThreadHeap()
603 { 654 {
604 ASSERT(!m_firstPage); 655 ASSERT(!m_firstPage);
605 ASSERT(!m_firstLargeHeapObject); 656 ASSERT(!m_firstLargeHeapObject);
606 } 657 }
607 658
608 template<typename Header> 659 template<typename Header>
609 void ThreadHeap<Header>::cleanupPages() 660 void ThreadHeap<Header>::cleanupPages()
610 { 661 {
611 clearFreeLists(); 662 clearFreeLists();
612 flushHeapContainsCache();
613 663
614 // Add the ThreadHeap's pages to the orphanedPagePool. 664 // Add the ThreadHeap's pages to the orphanedPagePool.
615 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) 665 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
616 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 666 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
617 m_firstPage = 0; 667 m_firstPage = 0;
618 668
619 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) 669 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
620 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); 670 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
621 m_firstLargeHeapObject = 0; 671 m_firstLargeHeapObject = 0;
622 } 672 }
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 968
919 // If ASan is supported we add allocationGranularity bytes to the allocated space and 969 // If ASan is supported we add allocationGranularity bytes to the allocated space and
920 // poison that to detect overflows 970 // poison that to detect overflows
921 #if defined(ADDRESS_SANITIZER) 971 #if defined(ADDRESS_SANITIZER)
922 allocationSize += allocationGranularity; 972 allocationSize += allocationGranularity;
923 #endif 973 #endif
924 if (threadState()->shouldGC()) 974 if (threadState()->shouldGC())
925 threadState()->setGCRequested(); 975 threadState()->setGCRequested();
926 Heap::flushHeapDoesNotContainCache(); 976 Heap::flushHeapDoesNotContainCache();
927 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 977 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
978 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
928 Address largeObjectAddress = pageMemory->writableStart(); 979 Address largeObjectAddress = pageMemory->writableStart();
929 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); 980 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
930 memset(headerAddress, 0, size); 981 memset(headerAddress, 0, size);
931 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 982 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
932 Address result = headerAddress + sizeof(*header); 983 Address result = headerAddress + sizeof(*header);
933 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 984 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
934 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); 985 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState());
935 986
936 // Poison the object header and allocationGranularity bytes after the object 987 // Poison the object header and allocationGranularity bytes after the object
937 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 988 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
938 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 989 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
939 largeObject->link(&m_firstLargeHeapObject); 990 largeObject->link(&m_firstLargeHeapObject);
940 stats().increaseAllocatedSpace(largeObject->size()); 991 stats().increaseAllocatedSpace(largeObject->size());
941 stats().increaseObjectSpace(largeObject->payloadSize()); 992 stats().increaseObjectSpace(largeObject->payloadSize());
942 return result; 993 return result;
943 } 994 }
944 995
945 template<typename Header> 996 template<typename Header>
946 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 997 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
947 { 998 {
948 flushHeapContainsCache();
949 object->unlink(previousNext); 999 object->unlink(previousNext);
950 object->finalize(); 1000 object->finalize();
951 1001
952 // Unpoison the object header and allocationGranularity bytes after the 1002 // Unpoison the object header and allocationGranularity bytes after the
953 // object before freeing. 1003 // object before freeing.
954 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 1004 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
955 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1005 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
956 1006
957 if (object->terminating()) { 1007 if (object->terminating()) {
958 ASSERT(ThreadState::current()->isTerminating()); 1008 ASSERT(ThreadState::current()->isTerminating());
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
1142 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 1192 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
1143 // since it is the same for all objects 1193 // since it is the same for all objects
1144 ASSERT(gcInfo); 1194 ASSERT(gcInfo);
1145 allocatePage(gcInfo); 1195 allocatePage(gcInfo);
1146 } 1196 }
1147 1197
1148 template <typename Header> 1198 template <typename Header>
1149 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) 1199 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1150 { 1200 {
1151 MutexLocker locker(m_threadState->sweepMutex()); 1201 MutexLocker locker(m_threadState->sweepMutex());
1152 flushHeapContainsCache();
1153 if (page->terminating()) { 1202 if (page->terminating()) {
1154 // The thread is shutting down so this page is being removed as part 1203 // The thread is shutting down so this page is being removed as part
1155 // of a thread local GC. In that case the page could be accessed in the 1204 // of a thread local GC. In that case the page could be accessed in the
1156 // next global GC either due to a dead object being traced via a 1205 // next global GC either due to a dead object being traced via a
1157 // conservative pointer or due to a programming error where an object 1206 // conservative pointer or due to a programming error where an object
1158 // in another thread heap keeps a dangling pointer to this object. 1207 // in another thread heap keeps a dangling pointer to this object.
1159 // To guard against this we put the page in the orphanedPagePool to 1208 // To guard against this we put the page in the orphanedPagePool to
1160 // ensure it is still reachable. After the next global GC it can be 1209 // ensure it is still reachable. After the next global GC it can be
1161 // decommitted and moved to the page pool assuming no rogue/dangling 1210 // decommitted and moved to the page pool assuming no rogue/dangling
1162 // pointers refer to it. 1211 // pointers refer to it.
1163 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 1212 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
1164 } else { 1213 } else {
1165 PageMemory* memory = page->storage(); 1214 PageMemory* memory = page->storage();
1166 page->~HeapPage<Header>(); 1215 page->~HeapPage<Header>();
1167 Heap::freePagePool()->addFreePage(m_index, memory); 1216 Heap::freePagePool()->addFreePage(m_index, memory);
1168 } 1217 }
1169 } 1218 }
1170 1219
1171 template<typename Header> 1220 template<typename Header>
1172 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) 1221 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
1173 { 1222 {
1174 Heap::flushHeapDoesNotContainCache(); 1223 Heap::flushHeapDoesNotContainCache();
1175 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); 1224 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1176 // We continue allocating page memory until we succeed in getting one. 1225 // We continue allocating page memory until we succeed in committing one.
1177 // Since the FreePagePool is global other threads could use all the
1178 // newly allocated page memory before this thread calls takeFreePage.
1179 while (!pageMemory) { 1226 while (!pageMemory) {
1180 // Allocate a memory region for blinkPagesPerRegion pages that 1227 // Allocate a memory region for blinkPagesPerRegion pages that
1181 // will each have the following layout. 1228 // will each have the following layout.
1182 // 1229 //
1183 // [ guard os page | ... payload ... | guard os page ] 1230 // [ guard os page | ... payload ... | guard os page ]
1184 // ^---{ aligned to blink page size } 1231 // ^---{ aligned to blink page size }
1185 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); 1232 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion);
1233 m_threadState->allocatedRegionsSinceLastGC().append(region);
1234
1186 // Setup the PageMemory object for each of the pages in the 1235 // Setup the PageMemory object for each of the pages in the
1187 // region. 1236 // region.
1188 size_t offset = 0; 1237 size_t offset = 0;
1189 for (size_t i = 0; i < blinkPagesPerRegion; i++) { 1238 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1190 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize())); 1239 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off set, blinkPagePayloadSize());
1240 // Take the first possible page ensuring that this thread actually
1241 // gets a page and add the rest to the page pool.
1242 if (!pageMemory) {
1243 if (memory->commit())
1244 pageMemory = memory;
1245 else
1246 delete memory;
1247 } else {
1248 Heap::freePagePool()->addFreePage(m_index, memory);
1249 }
haraken 2014/10/02 02:31:35 Probably a simpler way to do this would be just to
zerny-chromium 2014/10/02 07:48:42 That won't eliminate the chance that the ThreadSta
haraken 2014/10/02 07:51:03 Makes sense!
1191 offset += blinkPageSize; 1250 offset += blinkPageSize;
1192 } 1251 }
1193 pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1194 } 1252 }
1195 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 1253 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
1196 // Use a separate list for pages allocated during sweeping to make 1254 // Use a separate list for pages allocated during sweeping to make
1197 // sure that we do not accidentally sweep objects that have been 1255 // sure that we do not accidentally sweep objects that have been
1198 // allocated during sweeping. 1256 // allocated during sweeping.
1199 if (m_threadState->isSweepInProgress()) { 1257 if (m_threadState->isSweepInProgress()) {
1200 if (!m_lastPageAllocatedDuringSweeping) 1258 if (!m_lastPageAllocatedDuringSweeping)
1201 m_lastPageAllocatedDuringSweeping = page; 1259 m_lastPageAllocatedDuringSweeping = page;
1202 page->link(&m_firstPageAllocatedDuringSweeping); 1260 page->link(&m_firstPageAllocatedDuringSweeping);
1203 } else { 1261 } else {
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after
1742 } 1800 }
1743 1801
1744 if (json) { 1802 if (json) {
1745 json->setInteger("class", tag); 1803 json->setInteger("class", tag);
1746 json->setInteger("size", header->size()); 1804 json->setInteger("size", header->size());
1747 json->setInteger("isMarked", isMarked()); 1805 json->setInteger("isMarked", isMarked());
1748 } 1806 }
1749 } 1807 }
1750 #endif 1808 #endif
1751 1809
1752 template<typename Entry> 1810 void HeapDoesNotContainCache::flush()
1753 void HeapExtentCache<Entry>::flush()
1754 { 1811 {
1755 if (m_hasEntries) { 1812 if (m_hasEntries) {
1756 for (int i = 0; i < numberOfEntries; i++) 1813 for (int i = 0; i < numberOfEntries; i++)
1757 m_entries[i] = Entry(); 1814 m_entries[i] = 0;
1758 m_hasEntries = false; 1815 m_hasEntries = false;
1759 } 1816 }
1760 } 1817 }
1761 1818
1762 template<typename Entry> 1819 size_t HeapDoesNotContainCache::hash(Address address)
1763 size_t HeapExtentCache<Entry>::hash(Address address)
1764 { 1820 {
1765 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); 1821 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1766 value ^= value >> numberOfEntriesLog2; 1822 value ^= value >> numberOfEntriesLog2;
1767 value ^= value >> (numberOfEntriesLog2 * 2); 1823 value ^= value >> (numberOfEntriesLog2 * 2);
1768 value &= numberOfEntries - 1; 1824 value &= numberOfEntries - 1;
1769 return value & ~1; // Returns only even number. 1825 return value & ~1; // Returns only even number.
1770 } 1826 }
1771 1827
1772 template<typename Entry> 1828 bool HeapDoesNotContainCache::lookup(Address address)
1773 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1774 { 1829 {
1775 size_t index = hash(address); 1830 size_t index = hash(address);
1776 ASSERT(!(index & 1)); 1831 ASSERT(!(index & 1));
1777 Address cachePage = roundToBlinkPageStart(address); 1832 Address cachePage = roundToBlinkPageStart(address);
1778 if (m_entries[index].address() == cachePage) 1833 if (m_entries[index] == cachePage)
1779 return m_entries[index].result(); 1834 return m_entries[index];
1780 if (m_entries[index + 1].address() == cachePage) 1835 if (m_entries[index + 1] == cachePage)
1781 return m_entries[index + 1].result(); 1836 return m_entries[index + 1];
1782 return 0; 1837 return 0;
1783 } 1838 }
1784 1839
1785 template<typename Entry> 1840 void HeapDoesNotContainCache::addEntry(Address address)
1786 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes ult entry)
1787 { 1841 {
1788 m_hasEntries = true; 1842 m_hasEntries = true;
1789 size_t index = hash(address); 1843 size_t index = hash(address);
1790 ASSERT(!(index & 1)); 1844 ASSERT(!(index & 1));
1791 Address cachePage = roundToBlinkPageStart(address); 1845 Address cachePage = roundToBlinkPageStart(address);
1792 m_entries[index + 1] = m_entries[index]; 1846 m_entries[index + 1] = m_entries[index];
1793 m_entries[index] = Entry(cachePage, entry); 1847 m_entries[index] = cachePage;
1794 }
1795
1796 // These should not be needed, but it seems impossible to persuade clang to
1797 // instantiate the template functions and export them from a shared library, so
1798 // we add these in the non-templated subclass, which does not have that issue.
1799 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1800 {
1801 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1802 }
1803
1804 BaseHeapPage* HeapContainsCache::lookup(Address address)
1805 {
1806 return HeapExtentCache<PositiveEntry>::lookup(address);
1807 } 1848 }
1808 1849
1809 void Heap::flushHeapDoesNotContainCache() 1850 void Heap::flushHeapDoesNotContainCache()
1810 { 1851 {
1811 s_heapDoesNotContainCache->flush(); 1852 s_heapDoesNotContainCache->flush();
1812 } 1853 }
1813 1854
1814 // The marking mutex is used to ensure sequential access to data 1855 // The marking mutex is used to ensure sequential access to data
1815 // structures during marking. The marking mutex needs to be acquired 1856 // structures during marking. The marking mutex needs to be acquired
1816 // during marking when elements are taken from the global marking 1857 // during marking when elements are taken from the global marking
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
2099 delete s_orphanedPagePool; 2140 delete s_orphanedPagePool;
2100 s_orphanedPagePool = 0; 2141 s_orphanedPagePool = 0;
2101 delete s_weakCallbackStack; 2142 delete s_weakCallbackStack;
2102 s_weakCallbackStack = 0; 2143 s_weakCallbackStack = 0;
2103 delete s_postMarkingCallbackStack; 2144 delete s_postMarkingCallbackStack;
2104 s_postMarkingCallbackStack = 0; 2145 s_postMarkingCallbackStack = 0;
2105 delete s_markingStack; 2146 delete s_markingStack;
2106 s_markingStack = 0; 2147 s_markingStack = 0;
2107 delete s_ephemeronStack; 2148 delete s_ephemeronStack;
2108 s_ephemeronStack = 0; 2149 s_ephemeronStack = 0;
2150 delete s_regionTree;
2151 s_regionTree = 0;
2109 ThreadState::shutdown(); 2152 ThreadState::shutdown();
2110 } 2153 }
2111 2154
2112 BaseHeapPage* Heap::contains(Address address) 2155 BaseHeapPage* Heap::contains(Address address)
2113 { 2156 {
2114 ASSERT(ThreadState::isAnyThreadInGC()); 2157 ASSERT(ThreadState::isAnyThreadInGC());
2115 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2158 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2116 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2159 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2117 BaseHeapPage* page = (*it)->contains(address); 2160 BaseHeapPage* page = (*it)->contains(address);
2118 if (page) 2161 if (page)
(...skipping 11 matching lines...) Expand all
2130 2173
2131 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 2174 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2132 { 2175 {
2133 ASSERT(ThreadState::isAnyThreadInGC()); 2176 ASSERT(ThreadState::isAnyThreadInGC());
2134 2177
2135 #if !ENABLE(ASSERT) 2178 #if !ENABLE(ASSERT)
2136 if (s_heapDoesNotContainCache->lookup(address)) 2179 if (s_heapDoesNotContainCache->lookup(address))
2137 return 0; 2180 return 0;
2138 #endif 2181 #endif
2139 2182
2140 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2183 if (BaseHeapPage* page = lookup(address)) {
2141 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 2184 ASSERT(page->contains(address));
2142 if ((*it)->checkAndMarkPointer(visitor, address)) { 2185 ASSERT(!s_heapDoesNotContainCache->lookup(address));
2143 // Pointer was in a page of that thread. If it actually pointed 2186 // TODO: What if the thread owning this page is terminating?
2144 // into an object then that object was found and marked. 2187 page->checkAndMarkPointer(visitor, address);
2145 ASSERT(!s_heapDoesNotContainCache->lookup(address)); 2188 // TODO: We only need to set the conservative flag if checkAndMarkPointe r actually marked the pointer.
haraken 2014/10/02 02:31:35 TODO => FIXME
zerny-chromium 2014/10/02 07:48:42 Done. And removed the first TODO because it is saf
2146 s_lastGCWasConservative = true; 2189 s_lastGCWasConservative = true;
2147 return address; 2190 return address;
2148 }
2149 } 2191 }
2150 2192
2151 #if !ENABLE(ASSERT) 2193 #if !ENABLE(ASSERT)
2152 s_heapDoesNotContainCache->addEntry(address, true); 2194 s_heapDoesNotContainCache->addEntry(address);
2153 #else 2195 #else
2154 if (!s_heapDoesNotContainCache->lookup(address)) 2196 if (!s_heapDoesNotContainCache->lookup(address))
2155 s_heapDoesNotContainCache->addEntry(address, true); 2197 s_heapDoesNotContainCache->addEntry(address);
2156 #endif 2198 #endif
2157 return 0; 2199 return 0;
2158 } 2200 }
2159 2201
2160 #if ENABLE(GC_PROFILE_MARKING) 2202 #if ENABLE(GC_PROFILE_MARKING)
2161 const GCInfo* Heap::findGCInfo(Address address) 2203 const GCInfo* Heap::findGCInfo(Address address)
2162 { 2204 {
2163 return ThreadState::findGCInfoFromAllThreads(address); 2205 return ThreadState::findGCInfoFromAllThreads(address);
2164 } 2206 }
2165 #endif 2207 #endif
(...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after
2703 2745
2704 HeaderType* header = HeaderType::fromPayload(address); 2746 HeaderType* header = HeaderType::fromPayload(address);
2705 header->checkHeader(); 2747 header->checkHeader();
2706 2748
2707 const GCInfo* gcInfo = header->gcInfo(); 2749 const GCInfo* gcInfo = header->gcInfo();
2708 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); 2750 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
2709 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); 2751 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2710 heap->promptlyFreeObject(header); 2752 heap->promptlyFreeObject(header);
2711 } 2753 }
2712 2754
2755 BaseHeapPage* Heap::lookup(Address address)
2756 {
2757 if (!s_regionTree)
2758 return 0;
2759 if (PageMemoryRegion* region = s_regionTree->lookup(address))
2760 return region->pageFromAddress(address);
2761 return 0;
2762 }
2763
2764 static Mutex& regionTreeMutex()
2765 {
2766 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2767 return mutex;
2768 }
2769
2770 void Heap::removePageMemoryRegion(PageMemoryRegion* region)
2771 {
2772 // Deletion of large objects (and thus their region) can happen concurrently
haraken 2014/10/02 02:31:35 their region => their regions
zerny-chromium 2014/10/02 07:48:42 Done.
2773 // on sweeper threads. Removal can also happen during thread shutdown, but
2774 // that case is safe. Regardless, we make all removals mutually exclusive.
2775 MutexLocker locker(regionTreeMutex());
2776 RegionTree::remove(region, &s_regionTree);
2777 }
2778
2779 void Heap::addPageMemoryRegion(PageMemoryRegion* region)
2780 {
2781 ASSERT(ThreadState::isAnyThreadInGC());
2782 RegionTree::add(new RegionTree(region), &s_regionTree);
2783 }
2784
2785 PageMemoryRegion* Heap::RegionTree::lookup(Address address)
2786 {
2787 RegionTree* current = s_regionTree;
2788 while (current) {
2789 Address base = current->m_region->base();
2790 if (address < base) {
2791 current = current->m_left;
2792 continue;
2793 }
2794 if (address >= base + current->m_region->size()) {
2795 current = current->m_right;
2796 continue;
2797 }
2798 ASSERT(current->m_region->contains(address));
2799 return current->m_region;
2800 }
2801 return 0;
2802 }
2803
2804 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
2805 {
2806 ASSERT(newTree);
2807 Address base = newTree->m_region->base();
2808 for (RegionTree* current = *context; current; current = *context) {
2809 ASSERT(!current->m_region->contains(base));
2810 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
2811 }
2812 *context = newTree;
2813 }
2814
2815 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
2816 {
2817 ASSERT(region);
2818 ASSERT(context);
2819 ASSERT(*context);
2820 Address base = region->base();
2821 RegionTree* current = *context;
2822 for ( ; region != current->m_region; current = *context) {
2823 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
2824 ASSERT(*context);
2825 }
2826 *context = 0;
2827 if (current->m_left) {
2828 add(current->m_left, context);
2829 current->m_left = 0;
2830 }
2831 if (current->m_right) {
2832 add(current->m_right, context);
2833 current->m_right = 0;
2834 }
2835 delete current;
2836 }
2837
2713 // Force template instantiations for the types that we need. 2838 // Force template instantiations for the types that we need.
2714 template class HeapPage<FinalizedHeapObjectHeader>; 2839 template class HeapPage<FinalizedHeapObjectHeader>;
2715 template class HeapPage<HeapObjectHeader>; 2840 template class HeapPage<HeapObjectHeader>;
2716 template class ThreadHeap<FinalizedHeapObjectHeader>; 2841 template class ThreadHeap<FinalizedHeapObjectHeader>;
2717 template class ThreadHeap<HeapObjectHeader>; 2842 template class ThreadHeap<HeapObjectHeader>;
2718 2843
2719 Visitor* Heap::s_markingVisitor; 2844 Visitor* Heap::s_markingVisitor;
2720 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; 2845 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads;
2721 CallbackStack* Heap::s_markingStack; 2846 CallbackStack* Heap::s_markingStack;
2722 CallbackStack* Heap::s_postMarkingCallbackStack; 2847 CallbackStack* Heap::s_postMarkingCallbackStack;
2723 CallbackStack* Heap::s_weakCallbackStack; 2848 CallbackStack* Heap::s_weakCallbackStack;
2724 CallbackStack* Heap::s_ephemeronStack; 2849 CallbackStack* Heap::s_ephemeronStack;
2725 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2850 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2726 bool Heap::s_shutdownCalled = false; 2851 bool Heap::s_shutdownCalled = false;
2727 bool Heap::s_lastGCWasConservative = false; 2852 bool Heap::s_lastGCWasConservative = false;
2728 FreePagePool* Heap::s_freePagePool; 2853 FreePagePool* Heap::s_freePagePool;
2729 OrphanedPagePool* Heap::s_orphanedPagePool; 2854 OrphanedPagePool* Heap::s_orphanedPagePool;
2855 Heap::RegionTree* Heap::s_regionTree = 0;
2856
2730 } 2857 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698