Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 118 , m_size(size) | 118 , m_size(size) |
| 119 { | 119 { |
| 120 ASSERT(size > 0); | 120 ASSERT(size > 0); |
| 121 } | 121 } |
| 122 | 122 |
| 123 bool contains(Address addr) const | 123 bool contains(Address addr) const |
| 124 { | 124 { |
| 125 return m_base <= addr && addr < (m_base + m_size); | 125 return m_base <= addr && addr < (m_base + m_size); |
| 126 } | 126 } |
| 127 | 127 |
| 128 | |
| 129 bool contains(const MemoryRegion& other) const | 128 bool contains(const MemoryRegion& other) const |
| 130 { | 129 { |
| 131 return contains(other.m_base) && contains(other.m_base + other.m_size - 1); | 130 return contains(other.m_base) && contains(other.m_base + other.m_size - 1); |
| 132 } | 131 } |
| 133 | 132 |
| 134 void release() | 133 void release() |
| 135 { | 134 { |
| 136 #if OS(POSIX) | 135 #if OS(POSIX) |
| 137 int err = munmap(m_base, m_size); | 136 int err = munmap(m_base, m_size); |
| 138 RELEASE_ASSERT(!err); | 137 RELEASE_ASSERT(!err); |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 180 // whole. The PageMemoryRegion allows us to do that by keeping track | 179 // whole. The PageMemoryRegion allows us to do that by keeping track |
| 181 // of the number of pages using it in order to be able to release all | 180 // of the number of pages using it in order to be able to release all |
| 182 // of the virtual address space when there are no more pages using it. | 181 // of the virtual address space when there are no more pages using it. |
| 183 class PageMemoryRegion : public MemoryRegion { | 182 class PageMemoryRegion : public MemoryRegion { |
| 184 public: | 183 public: |
| 185 ~PageMemoryRegion() | 184 ~PageMemoryRegion() |
| 186 { | 185 { |
| 187 release(); | 186 release(); |
| 188 } | 187 } |
| 189 | 188 |
| 190 void pageRemoved() | 189 void pageDeleted(Address page) |
| 191 { | 190 { |
| 192 if (!--m_numPages) | 191 decommitPage(page); |
| 192 if (!--m_numPages) { | |
| 193 Heap::removePageMemoryRegion(this); | |
| 193 delete this; | 194 delete this; |
| 195 } | |
| 196 } | |
| 197 | |
| 198 void commitPage(Address page) | |
| 199 { | |
| 200 ASSERT(!m_committed[index(page)]); | |
| 201 m_committed[index(page)] = true; | |
| 202 } | |
| 203 | |
| 204 void decommitPage(Address page) | |
| 205 { | |
| 206 m_committed[index(page)] = false; | |
| 194 } | 207 } |
| 195 | 208 |
| 196 static PageMemoryRegion* allocate(size_t size, unsigned numPages) | 209 static PageMemoryRegion* allocate(size_t size, unsigned numPages) |
| 197 { | 210 { |
| 198 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | 211 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
|
wibling-chromium
2014/10/01 13:12:16
NIT: It would be nice with an ASSERT here checking
zerny-chromium
2014/10/01 14:19:55
Sure. Added the slightly tighter:
ASSERT(n
| |
| 199 | 212 |
| 200 // Compute a random blink page aligned address for the page memory | 213 // Compute a random blink page aligned address for the page memory |
| 201 // region and attempt to get the memory there. | 214 // region and attempt to get the memory there. |
| 202 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ()); | 215 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase ()); |
| 203 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); | 216 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); |
| 204 | 217 |
| 205 #if OS(POSIX) | 218 #if OS(POSIX) |
| 206 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); | 219 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| 207 RELEASE_ASSERT(base != MAP_FAILED); | 220 RELEASE_ASSERT(base != MAP_FAILED); |
| 208 if (base == roundToBlinkPageBoundary(base)) | 221 if (base == roundToBlinkPageBoundary(base)) |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 267 | 280 |
| 268 // FIXME: If base is by accident blink page size aligned | 281 // FIXME: If base is by accident blink page size aligned |
| 269 // here then we can create two pages out of reserved | 282 // here then we can create two pages out of reserved |
| 270 // space. Do this. | 283 // space. Do this. |
| 271 Address alignedBase = roundToBlinkPageBoundary(base); | 284 Address alignedBase = roundToBlinkPageBoundary(base); |
| 272 | 285 |
| 273 return new PageMemoryRegion(alignedBase, size, numPages); | 286 return new PageMemoryRegion(alignedBase, size, numPages); |
| 274 #endif | 287 #endif |
| 275 } | 288 } |
| 276 | 289 |
| 290 BaseHeapPage* pageFromAddress(Address address) | |
| 291 { | |
| 292 ASSERT(contains(address)); | |
| 293 if (!m_committed[index(address)]) | |
| 294 return 0; | |
| 295 if (m_isLargePage) | |
| 296 return pageHeaderFromObject(base()); | |
| 297 return pageHeaderFromObject(address); | |
| 298 } | |
| 299 | |
| 277 private: | 300 private: |
| 278 PageMemoryRegion(Address base, size_t size, unsigned numPages) | 301 PageMemoryRegion(Address base, size_t size, unsigned numPages) |
| 279 : MemoryRegion(base, size) | 302 : MemoryRegion(base, size) |
| 303 , m_isLargePage(numPages == 1) | |
| 280 , m_numPages(numPages) | 304 , m_numPages(numPages) |
| 281 { | 305 { |
| 306 for (size_t i = 0; i < blinkPagesPerRegion; ++i) | |
| 307 m_committed[i] = false; | |
| 282 } | 308 } |
| 283 | 309 |
| 310 unsigned index(Address address) | |
| 311 { | |
| 312 ASSERT(contains(address)); | |
| 313 if (m_isLargePage) | |
| 314 return 0; | |
| 315 size_t offset = blinkPageAddress(address) - base(); | |
| 316 ASSERT(offset % blinkPageSize == 0); | |
| 317 return offset / blinkPageSize; | |
| 318 } | |
| 319 | |
| 320 bool m_isLargePage; | |
| 321 bool m_committed[blinkPagesPerRegion]; | |
| 284 unsigned m_numPages; | 322 unsigned m_numPages; |
| 285 }; | 323 }; |
| 286 | 324 |
| 287 // Representation of the memory used for a Blink heap page. | 325 // Representation of the memory used for a Blink heap page. |
| 288 // | 326 // |
| 289 // The representation keeps track of two memory regions: | 327 // The representation keeps track of two memory regions: |
| 290 // | 328 // |
| 291 // 1. The virtual memory reserved from the system in order to be able | 329 // 1. The virtual memory reserved from the system in order to be able |
| 292 // to free all the virtual memory reserved. Multiple PageMemory | 330 // to free all the virtual memory reserved. Multiple PageMemory |
| 293 // instances can share the same reserved memory region and | 331 // instances can share the same reserved memory region and |
| 294 // therefore notify the reserved memory region on destruction so | 332 // therefore notify the reserved memory region on destruction so |
| 295 // that the system memory can be given back when all PageMemory | 333 // that the system memory can be given back when all PageMemory |
| 296 // instances for that memory are gone. | 334 // instances for that memory are gone. |
| 297 // | 335 // |
| 298 // 2. The writable memory (a sub-region of the reserved virtual | 336 // 2. The writable memory (a sub-region of the reserved virtual |
| 299 // memory region) that is used for the actual heap page payload. | 337 // memory region) that is used for the actual heap page payload. |
| 300 // | 338 // |
| 301 // Guard pages are created before and after the writable memory. | 339 // Guard pages are created before and after the writable memory. |
| 302 class PageMemory { | 340 class PageMemory { |
| 303 public: | 341 public: |
| 304 ~PageMemory() | 342 ~PageMemory() |
| 305 { | 343 { |
| 306 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); | 344 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| 307 m_reserved->pageRemoved(); | 345 m_reserved->pageDeleted(writableStart()); |
| 308 } | 346 } |
| 309 | 347 |
| 310 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } | 348 bool commit() WARN_UNUSED_RETURN |
| 311 void decommit() { m_writable.decommit(); } | 349 { |
| 350 m_reserved->commitPage(writableStart()); | |
| 351 return m_writable.commit(); | |
| 352 } | |
| 353 | |
| 354 void decommit() | |
| 355 { | |
| 356 m_reserved->decommitPage(writableStart()); | |
| 357 m_writable.decommit(); | |
| 358 } | |
| 359 | |
| 360 PageMemoryRegion* region() { return m_reserved; } | |
| 312 | 361 |
| 313 Address writableStart() { return m_writable.base(); } | 362 Address writableStart() { return m_writable.base(); } |
| 314 | 363 |
| 315 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) | 364 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) |
| 316 { | 365 { |
| 317 // Setup the payload one OS page into the page memory. The | 366 // Setup the payload one OS page into the page memory. The |
| 318 // first os page is the guard page. | 367 // first os page is the guard page. |
| 319 Address payloadAddress = region->base() + pageOffset + osPageSize(); | 368 Address payloadAddress = region->base() + pageOffset + osPageSize(); |
| 320 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ; | 369 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)) ; |
| 321 } | 370 } |
| (...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 602 ThreadHeap<Header>::~ThreadHeap() | 651 ThreadHeap<Header>::~ThreadHeap() |
| 603 { | 652 { |
| 604 ASSERT(!m_firstPage); | 653 ASSERT(!m_firstPage); |
| 605 ASSERT(!m_firstLargeHeapObject); | 654 ASSERT(!m_firstLargeHeapObject); |
| 606 } | 655 } |
| 607 | 656 |
| 608 template<typename Header> | 657 template<typename Header> |
| 609 void ThreadHeap<Header>::cleanupPages() | 658 void ThreadHeap<Header>::cleanupPages() |
| 610 { | 659 { |
| 611 clearFreeLists(); | 660 clearFreeLists(); |
| 612 flushHeapContainsCache(); | |
| 613 | 661 |
| 614 // Add the ThreadHeap's pages to the orphanedPagePool. | 662 // Add the ThreadHeap's pages to the orphanedPagePool. |
| 615 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) | 663 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) |
| 616 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | 664 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 617 m_firstPage = 0; | 665 m_firstPage = 0; |
| 618 | 666 |
| 619 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) | 667 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) |
| 620 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); | 668 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 621 m_firstLargeHeapObject = 0; | 669 m_firstLargeHeapObject = 0; |
| 622 } | 670 } |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 918 | 966 |
| 919 // If ASan is supported we add allocationGranularity bytes to the allocated space and | 967 // If ASan is supported we add allocationGranularity bytes to the allocated space and |
| 920 // poison that to detect overflows | 968 // poison that to detect overflows |
| 921 #if defined(ADDRESS_SANITIZER) | 969 #if defined(ADDRESS_SANITIZER) |
| 922 allocationSize += allocationGranularity; | 970 allocationSize += allocationGranularity; |
| 923 #endif | 971 #endif |
| 924 if (threadState()->shouldGC()) | 972 if (threadState()->shouldGC()) |
| 925 threadState()->setGCRequested(); | 973 threadState()->setGCRequested(); |
| 926 Heap::flushHeapDoesNotContainCache(); | 974 Heap::flushHeapDoesNotContainCache(); |
| 927 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 975 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| 976 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); | |
| 928 Address largeObjectAddress = pageMemory->writableStart(); | 977 Address largeObjectAddress = pageMemory->writableStart(); |
| 929 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); | 978 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); |
| 930 memset(headerAddress, 0, size); | 979 memset(headerAddress, 0, size); |
| 931 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 980 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
| 932 Address result = headerAddress + sizeof(*header); | 981 Address result = headerAddress + sizeof(*header); |
| 933 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 982 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 934 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); | 983 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj ect<Header>(pageMemory, gcInfo, threadState()); |
| 935 | 984 |
| 936 // Poison the object header and allocationGranularity bytes after the object | 985 // Poison the object header and allocationGranularity bytes after the object |
| 937 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 986 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 938 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); | 987 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); |
| 939 largeObject->link(&m_firstLargeHeapObject); | 988 largeObject->link(&m_firstLargeHeapObject); |
| 940 stats().increaseAllocatedSpace(largeObject->size()); | 989 stats().increaseAllocatedSpace(largeObject->size()); |
| 941 stats().increaseObjectSpace(largeObject->payloadSize()); | 990 stats().increaseObjectSpace(largeObject->payloadSize()); |
| 942 return result; | 991 return result; |
| 943 } | 992 } |
| 944 | 993 |
| 945 template<typename Header> | 994 template<typename Header> |
| 946 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) | 995 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) |
| 947 { | 996 { |
| 948 flushHeapContainsCache(); | |
| 949 object->unlink(previousNext); | 997 object->unlink(previousNext); |
| 950 object->finalize(); | 998 object->finalize(); |
| 951 | 999 |
| 952 // Unpoison the object header and allocationGranularity bytes after the | 1000 // Unpoison the object header and allocationGranularity bytes after the |
| 953 // object before freeing. | 1001 // object before freeing. |
| 954 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 1002 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
| 955 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); | 1003 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); |
| 956 | 1004 |
| 957 if (object->terminating()) { | 1005 if (object->terminating()) { |
| 958 ASSERT(ThreadState::current()->isTerminating()); | 1006 ASSERT(ThreadState::current()->isTerminating()); |
| (...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1142 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap | 1190 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap |
| 1143 // since it is the same for all objects | 1191 // since it is the same for all objects |
| 1144 ASSERT(gcInfo); | 1192 ASSERT(gcInfo); |
| 1145 allocatePage(gcInfo); | 1193 allocatePage(gcInfo); |
| 1146 } | 1194 } |
| 1147 | 1195 |
| 1148 template <typename Header> | 1196 template <typename Header> |
| 1149 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) | 1197 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
| 1150 { | 1198 { |
| 1151 MutexLocker locker(m_threadState->sweepMutex()); | 1199 MutexLocker locker(m_threadState->sweepMutex()); |
| 1152 flushHeapContainsCache(); | |
| 1153 if (page->terminating()) { | 1200 if (page->terminating()) { |
| 1154 // The thread is shutting down so this page is being removed as part | 1201 // The thread is shutting down so this page is being removed as part |
| 1155 // of a thread local GC. In that case the page could be accessed in the | 1202 // of a thread local GC. In that case the page could be accessed in the |
| 1156 // next global GC either due to a dead object being traced via a | 1203 // next global GC either due to a dead object being traced via a |
| 1157 // conservative pointer or due to a programming error where an object | 1204 // conservative pointer or due to a programming error where an object |
| 1158 // in another thread heap keeps a dangling pointer to this object. | 1205 // in another thread heap keeps a dangling pointer to this object. |
| 1159 // To guard against this we put the page in the orphanedPagePool to | 1206 // To guard against this we put the page in the orphanedPagePool to |
| 1160 // ensure it is still reachable. After the next global GC it can be | 1207 // ensure it is still reachable. After the next global GC it can be |
| 1161 // decommitted and moved to the page pool assuming no rogue/dangling | 1208 // decommitted and moved to the page pool assuming no rogue/dangling |
| 1162 // pointers refer to it. | 1209 // pointers refer to it. |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 1176 // We continue allocating page memory until we succeed in getting one. | 1223 // We continue allocating page memory until we succeed in getting one. |
| 1177 // Since the FreePagePool is global other threads could use all the | 1224 // Since the FreePagePool is global other threads could use all the |
| 1178 // newly allocated page memory before this thread calls takeFreePage. | 1225 // newly allocated page memory before this thread calls takeFreePage. |
| 1179 while (!pageMemory) { | 1226 while (!pageMemory) { |
| 1180 // Allocate a memory region for blinkPagesPerRegion pages that | 1227 // Allocate a memory region for blinkPagesPerRegion pages that |
| 1181 // will each have the following layout. | 1228 // will each have the following layout. |
| 1182 // | 1229 // |
| 1183 // [ guard os page | ... payload ... | guard os page ] | 1230 // [ guard os page | ... payload ... | guard os page ] |
| 1184 // ^---{ aligned to blink page size } | 1231 // ^---{ aligned to blink page size } |
| 1185 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); | 1232 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); |
| 1233 m_threadState->allocatedRegionsSinceLastGC().append(region); | |
|
wibling-chromium
2014/10/01 13:12:16
NIT: Perhaps add a comment about the race we discu
zerny-chromium
2014/10/01 14:19:55
Mads suggested rewriting so we always take at leas
| |
| 1186 // Setup the PageMemory object for each of the pages in the | 1234 // Setup the PageMemory object for each of the pages in the |
| 1187 // region. | 1235 // region. |
| 1188 size_t offset = 0; | 1236 size_t offset = 0; |
| 1189 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | 1237 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
| 1190 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize())); | 1238 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize())); |
| 1191 offset += blinkPageSize; | 1239 offset += blinkPageSize; |
| 1192 } | 1240 } |
| 1193 pageMemory = Heap::freePagePool()->takeFreePage(m_index); | 1241 pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
| 1194 } | 1242 } |
| 1195 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); | 1243 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); |
| (...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1742 } | 1790 } |
| 1743 | 1791 |
| 1744 if (json) { | 1792 if (json) { |
| 1745 json->setInteger("class", tag); | 1793 json->setInteger("class", tag); |
| 1746 json->setInteger("size", header->size()); | 1794 json->setInteger("size", header->size()); |
| 1747 json->setInteger("isMarked", isMarked()); | 1795 json->setInteger("isMarked", isMarked()); |
| 1748 } | 1796 } |
| 1749 } | 1797 } |
| 1750 #endif | 1798 #endif |
| 1751 | 1799 |
| 1752 template<typename Entry> | 1800 void HeapDoesNotContainCache::flush() |
| 1753 void HeapExtentCache<Entry>::flush() | |
| 1754 { | 1801 { |
| 1755 if (m_hasEntries) { | 1802 if (m_hasEntries) { |
| 1756 for (int i = 0; i < numberOfEntries; i++) | 1803 for (int i = 0; i < numberOfEntries; i++) |
| 1757 m_entries[i] = Entry(); | 1804 m_entries[i] = 0; |
| 1758 m_hasEntries = false; | 1805 m_hasEntries = false; |
| 1759 } | 1806 } |
| 1760 } | 1807 } |
| 1761 | 1808 |
| 1762 template<typename Entry> | 1809 size_t HeapDoesNotContainCache::hash(Address address) |
| 1763 size_t HeapExtentCache<Entry>::hash(Address address) | |
| 1764 { | 1810 { |
| 1765 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1811 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| 1766 value ^= value >> numberOfEntriesLog2; | 1812 value ^= value >> numberOfEntriesLog2; |
| 1767 value ^= value >> (numberOfEntriesLog2 * 2); | 1813 value ^= value >> (numberOfEntriesLog2 * 2); |
| 1768 value &= numberOfEntries - 1; | 1814 value &= numberOfEntries - 1; |
| 1769 return value & ~1; // Returns only even number. | 1815 return value & ~1; // Returns only even number. |
| 1770 } | 1816 } |
| 1771 | 1817 |
| 1772 template<typename Entry> | 1818 bool HeapDoesNotContainCache::lookup(Address address) |
| 1773 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) | |
| 1774 { | 1819 { |
| 1775 size_t index = hash(address); | 1820 size_t index = hash(address); |
| 1776 ASSERT(!(index & 1)); | 1821 ASSERT(!(index & 1)); |
| 1777 Address cachePage = roundToBlinkPageStart(address); | 1822 Address cachePage = roundToBlinkPageStart(address); |
| 1778 if (m_entries[index].address() == cachePage) | 1823 if (m_entries[index] == cachePage) |
| 1779 return m_entries[index].result(); | 1824 return m_entries[index]; |
| 1780 if (m_entries[index + 1].address() == cachePage) | 1825 if (m_entries[index + 1] == cachePage) |
| 1781 return m_entries[index + 1].result(); | 1826 return m_entries[index + 1]; |
| 1782 return 0; | 1827 return 0; |
| 1783 } | 1828 } |
| 1784 | 1829 |
| 1785 template<typename Entry> | 1830 void HeapDoesNotContainCache::addEntry(Address address) |
| 1786 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes ult entry) | |
| 1787 { | 1831 { |
| 1788 m_hasEntries = true; | 1832 m_hasEntries = true; |
| 1789 size_t index = hash(address); | 1833 size_t index = hash(address); |
| 1790 ASSERT(!(index & 1)); | 1834 ASSERT(!(index & 1)); |
| 1791 Address cachePage = roundToBlinkPageStart(address); | 1835 Address cachePage = roundToBlinkPageStart(address); |
| 1792 m_entries[index + 1] = m_entries[index]; | 1836 m_entries[index + 1] = m_entries[index]; |
| 1793 m_entries[index] = Entry(cachePage, entry); | 1837 m_entries[index] = cachePage; |
| 1794 } | |
| 1795 | |
| 1796 // These should not be needed, but it seems impossible to persuade clang to | |
| 1797 // instantiate the template functions and export them from a shared library, so | |
| 1798 // we add these in the non-templated subclass, which does not have that issue. | |
| 1799 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1800 { | |
| 1801 HeapExtentCache<PositiveEntry>::addEntry(address, page); | |
| 1802 } | |
| 1803 | |
| 1804 BaseHeapPage* HeapContainsCache::lookup(Address address) | |
| 1805 { | |
| 1806 return HeapExtentCache<PositiveEntry>::lookup(address); | |
| 1807 } | 1838 } |
| 1808 | 1839 |
| 1809 void Heap::flushHeapDoesNotContainCache() | 1840 void Heap::flushHeapDoesNotContainCache() |
| 1810 { | 1841 { |
| 1811 s_heapDoesNotContainCache->flush(); | 1842 s_heapDoesNotContainCache->flush(); |
| 1812 } | 1843 } |
| 1813 | 1844 |
| 1814 // The marking mutex is used to ensure sequential access to data | 1845 // The marking mutex is used to ensure sequential access to data |
| 1815 // structures during marking. The marking mutex needs to be acquired | 1846 // structures during marking. The marking mutex needs to be acquired |
| 1816 // during marking when elements are taken from the global marking | 1847 // during marking when elements are taken from the global marking |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2099 delete s_orphanedPagePool; | 2130 delete s_orphanedPagePool; |
| 2100 s_orphanedPagePool = 0; | 2131 s_orphanedPagePool = 0; |
| 2101 delete s_weakCallbackStack; | 2132 delete s_weakCallbackStack; |
| 2102 s_weakCallbackStack = 0; | 2133 s_weakCallbackStack = 0; |
| 2103 delete s_postMarkingCallbackStack; | 2134 delete s_postMarkingCallbackStack; |
| 2104 s_postMarkingCallbackStack = 0; | 2135 s_postMarkingCallbackStack = 0; |
| 2105 delete s_markingStack; | 2136 delete s_markingStack; |
| 2106 s_markingStack = 0; | 2137 s_markingStack = 0; |
| 2107 delete s_ephemeronStack; | 2138 delete s_ephemeronStack; |
| 2108 s_ephemeronStack = 0; | 2139 s_ephemeronStack = 0; |
| 2140 delete s_regionTree; | |
| 2141 s_regionTree = 0; | |
| 2109 ThreadState::shutdown(); | 2142 ThreadState::shutdown(); |
| 2110 } | 2143 } |
| 2111 | 2144 |
| 2112 BaseHeapPage* Heap::contains(Address address) | 2145 BaseHeapPage* Heap::contains(Address address) |
| 2113 { | 2146 { |
| 2114 ASSERT(ThreadState::isAnyThreadInGC()); | 2147 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2115 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 2148 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 2116 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 2149 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 2117 BaseHeapPage* page = (*it)->contains(address); | 2150 BaseHeapPage* page = (*it)->contains(address); |
| 2118 if (page) | 2151 if (page) |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 2130 | 2163 |
| 2131 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 2164 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 2132 { | 2165 { |
| 2133 ASSERT(ThreadState::isAnyThreadInGC()); | 2166 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2134 | 2167 |
| 2135 #if !ENABLE(ASSERT) | 2168 #if !ENABLE(ASSERT) |
| 2136 if (s_heapDoesNotContainCache->lookup(address)) | 2169 if (s_heapDoesNotContainCache->lookup(address)) |
| 2137 return 0; | 2170 return 0; |
| 2138 #endif | 2171 #endif |
| 2139 | 2172 |
| 2140 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 2173 if (BaseHeapPage* page = lookup(address)) { |
| 2141 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 2174 ASSERT(page->contains(address)); |
| 2142 if ((*it)->checkAndMarkPointer(visitor, address)) { | 2175 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| 2143 // Pointer was in a page of that thread. If it actually pointed | 2176 // TODO: What if the thread owning this page is terminating? |
| 2144 // into an object then that object was found and marked. | 2177 page->checkAndMarkPointer(visitor, address); |
| 2145 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 2178 // TODO: We only need to set the conservative flag if checkAndMarkPointe r actually marked the pointer. |
| 2146 s_lastGCWasConservative = true; | 2179 s_lastGCWasConservative = true; |
| 2147 return address; | 2180 return address; |
| 2148 } | |
| 2149 } | 2181 } |
| 2150 | 2182 |
| 2151 #if !ENABLE(ASSERT) | 2183 #if !ENABLE(ASSERT) |
| 2152 s_heapDoesNotContainCache->addEntry(address, true); | 2184 s_heapDoesNotContainCache->addEntry(address); |
| 2153 #else | 2185 #else |
| 2154 if (!s_heapDoesNotContainCache->lookup(address)) | 2186 if (!s_heapDoesNotContainCache->lookup(address)) |
| 2155 s_heapDoesNotContainCache->addEntry(address, true); | 2187 s_heapDoesNotContainCache->addEntry(address); |
| 2156 #endif | 2188 #endif |
| 2157 return 0; | 2189 return 0; |
| 2158 } | 2190 } |
| 2159 | 2191 |
| 2160 #if ENABLE(GC_PROFILE_MARKING) | 2192 #if ENABLE(GC_PROFILE_MARKING) |
| 2161 const GCInfo* Heap::findGCInfo(Address address) | 2193 const GCInfo* Heap::findGCInfo(Address address) |
| 2162 { | 2194 { |
| 2163 return ThreadState::findGCInfoFromAllThreads(address); | 2195 return ThreadState::findGCInfoFromAllThreads(address); |
| 2164 } | 2196 } |
| 2165 #endif | 2197 #endif |
| (...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2703 | 2735 |
| 2704 HeaderType* header = HeaderType::fromPayload(address); | 2736 HeaderType* header = HeaderType::fromPayload(address); |
| 2705 header->checkHeader(); | 2737 header->checkHeader(); |
| 2706 | 2738 |
| 2707 const GCInfo* gcInfo = header->gcInfo(); | 2739 const GCInfo* gcInfo = header->gcInfo(); |
| 2708 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 2740 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); |
| 2709 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); | 2741 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); |
| 2710 heap->promptlyFreeObject(header); | 2742 heap->promptlyFreeObject(header); |
| 2711 } | 2743 } |
| 2712 | 2744 |
| 2745 BaseHeapPage* Heap::lookup(Address address) | |
| 2746 { | |
| 2747 if (!s_regionTree) | |
| 2748 return 0; | |
| 2749 if (PageMemoryRegion* region = s_regionTree->lookup(address)) | |
| 2750 return region->pageFromAddress(address); | |
| 2751 return 0; | |
| 2752 } | |
| 2753 | |
| 2754 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | |
| 2755 { | |
| 2756 // Deletion of large objects (and thus their region) can happen concurrently | |
| 2757 // on sweeper threads. Removal can also happen during thread shutdown, but | |
| 2758 // that case is safe. Regardless, we make removal mutually exclusive and | |
| 2759 // reuse the marking mutex which is not in use during any of the two cases. | |
| 2760 MutexLocker locker(markingMutex()); | |
| 2761 RegionTree::remove(region, &s_regionTree); | |
| 2762 } | |
| 2763 | |
| 2764 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | |
| 2765 { | |
| 2766 // No locking because regions are only added in ThreadState::prepareForGC wh ich is in a GCScope. | |
| 2767 RegionTree::add(new RegionTree(region), &s_regionTree); | |
| 2768 } | |
| 2769 | |
| 2770 PageMemoryRegion* Heap::RegionTree::lookup(Address address) | |
| 2771 { | |
| 2772 RegionTree* current = s_regionTree; | |
| 2773 while (current) { | |
| 2774 Address base = current->m_region->base(); | |
| 2775 if (address < base) { | |
| 2776 current = current->m_left; | |
| 2777 continue; | |
| 2778 } | |
| 2779 if (address >= base + current->m_region->size()) { | |
| 2780 current = current->m_right; | |
| 2781 continue; | |
| 2782 } | |
| 2783 ASSERT(current->m_region->contains(address)); | |
| 2784 return current->m_region; | |
| 2785 } | |
| 2786 return 0; | |
| 2787 } | |
| 2788 | |
| 2789 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) | |
| 2790 { | |
| 2791 if (!newTree) | |
| 2792 return; | |
| 2793 Address base = newTree->m_region->base(); | |
| 2794 for (RegionTree* current = *context; current; current = *context) { | |
| 2795 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
|
wibling-chromium
2014/10/01 13:12:16
You could make this a real if and add an assert in
zerny-chromium
2014/10/01 14:19:55
Are you OK with keeping the if-expr and unconditio
| |
| 2796 } | |
| 2797 *context = newTree; | |
| 2798 } | |
| 2799 | |
| 2800 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) | |
| 2801 { | |
| 2802 ASSERT(region); | |
| 2803 ASSERT(context); | |
| 2804 Address base = region->base(); | |
| 2805 RegionTree* current = *context; | |
| 2806 ASSERT(current); | |
| 2807 while (region != current->m_region) { | |
|
wibling-chromium
2014/10/01 13:12:16
NIT: Why the "while" here instead of the for loop
zerny-chromium
2014/10/01 14:19:55
current is needed outside and the assert structure
| |
| 2808 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 2809 current = *context; | |
| 2810 ASSERT(current); | |
| 2811 } | |
| 2812 *context = 0; | |
| 2813 add(current->m_left, context); | |
| 2814 add(current->m_right, context); | |
| 2815 current->m_left = 0; | |
| 2816 current->m_right = 0; | |
| 2817 delete current; | |
| 2818 } | |
| 2819 | |
| 2713 // Force template instantiations for the types that we need. | 2820 // Force template instantiations for the types that we need. |
| 2714 template class HeapPage<FinalizedHeapObjectHeader>; | 2821 template class HeapPage<FinalizedHeapObjectHeader>; |
| 2715 template class HeapPage<HeapObjectHeader>; | 2822 template class HeapPage<HeapObjectHeader>; |
| 2716 template class ThreadHeap<FinalizedHeapObjectHeader>; | 2823 template class ThreadHeap<FinalizedHeapObjectHeader>; |
| 2717 template class ThreadHeap<HeapObjectHeader>; | 2824 template class ThreadHeap<HeapObjectHeader>; |
| 2718 | 2825 |
| 2719 Visitor* Heap::s_markingVisitor; | 2826 Visitor* Heap::s_markingVisitor; |
| 2720 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; | 2827 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; |
| 2721 CallbackStack* Heap::s_markingStack; | 2828 CallbackStack* Heap::s_markingStack; |
| 2722 CallbackStack* Heap::s_postMarkingCallbackStack; | 2829 CallbackStack* Heap::s_postMarkingCallbackStack; |
| 2723 CallbackStack* Heap::s_weakCallbackStack; | 2830 CallbackStack* Heap::s_weakCallbackStack; |
| 2724 CallbackStack* Heap::s_ephemeronStack; | 2831 CallbackStack* Heap::s_ephemeronStack; |
| 2725 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2832 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2726 bool Heap::s_shutdownCalled = false; | 2833 bool Heap::s_shutdownCalled = false; |
| 2727 bool Heap::s_lastGCWasConservative = false; | 2834 bool Heap::s_lastGCWasConservative = false; |
| 2728 FreePagePool* Heap::s_freePagePool; | 2835 FreePagePool* Heap::s_freePagePool; |
| 2729 OrphanedPagePool* Heap::s_orphanedPagePool; | 2836 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2837 Heap::RegionTree* Heap::s_regionTree = 0; | |
| 2838 | |
| 2730 } | 2839 } |
| OLD | NEW |