| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 118 , m_size(size) | 118 , m_size(size) |
| 119 { | 119 { |
| 120 ASSERT(size > 0); | 120 ASSERT(size > 0); |
| 121 } | 121 } |
| 122 | 122 |
| 123 bool contains(Address addr) const | 123 bool contains(Address addr) const |
| 124 { | 124 { |
| 125 return m_base <= addr && addr < (m_base + m_size); | 125 return m_base <= addr && addr < (m_base + m_size); |
| 126 } | 126 } |
| 127 | 127 |
| 128 | |
| 129 bool contains(const MemoryRegion& other) const | 128 bool contains(const MemoryRegion& other) const |
| 130 { | 129 { |
| 131 return contains(other.m_base) && contains(other.m_base + other.m_size -
1); | 130 return contains(other.m_base) && contains(other.m_base + other.m_size -
1); |
| 132 } | 131 } |
| 133 | 132 |
| 134 void release() | 133 void release() |
| 135 { | 134 { |
| 136 #if OS(POSIX) | 135 #if OS(POSIX) |
| 137 int err = munmap(m_base, m_size); | 136 int err = munmap(m_base, m_size); |
| 138 RELEASE_ASSERT(!err); | 137 RELEASE_ASSERT(!err); |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 180 // whole. The PageMemoryRegion allows us to do that by keeping track | 179 // whole. The PageMemoryRegion allows us to do that by keeping track |
| 181 // of the number of pages using it in order to be able to release all | 180 // of the number of pages using it in order to be able to release all |
| 182 // of the virtual address space when there are no more pages using it. | 181 // of the virtual address space when there are no more pages using it. |
| 183 class PageMemoryRegion : public MemoryRegion { | 182 class PageMemoryRegion : public MemoryRegion { |
| 184 public: | 183 public: |
| 185 ~PageMemoryRegion() | 184 ~PageMemoryRegion() |
| 186 { | 185 { |
| 187 release(); | 186 release(); |
| 188 } | 187 } |
| 189 | 188 |
| 190 void pageRemoved() | 189 void pageDeleted(Address page) |
| 191 { | 190 { |
| 192 if (!--m_numPages) | 191 markPageUnused(page); |
| 192 if (!--m_numPages) { |
| 193 Heap::removePageMemoryRegion(this); |
| 193 delete this; | 194 delete this; |
| 195 } |
| 196 } |
| 197 |
| 198 void markPageUsed(Address page) |
| 199 { |
| 200 ASSERT(!m_inUse[index(page)]); |
| 201 m_inUse[index(page)] = true; |
| 202 } |
| 203 |
| 204 void markPageUnused(Address page) |
| 205 { |
| 206 m_inUse[index(page)] = false; |
| 207 } |
| 208 |
| 209 static PageMemoryRegion* allocateLargePage(size_t size) |
| 210 { |
| 211 return allocate(size, 1); |
| 212 } |
| 213 |
| 214 static PageMemoryRegion* allocateNormalPages() |
| 215 { |
| 216 return allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion
); |
| 217 } |
| 218 |
| 219 BaseHeapPage* pageFromAddress(Address address) |
| 220 { |
| 221 ASSERT(contains(address)); |
| 222 if (!m_inUse[index(address)]) |
| 223 return 0; |
| 224 if (m_isLargePage) |
| 225 return pageHeaderFromObject(base()); |
| 226 return pageHeaderFromObject(address); |
| 227 } |
| 228 |
| 229 private: |
| 230 PageMemoryRegion(Address base, size_t size, unsigned numPages) |
| 231 : MemoryRegion(base, size) |
| 232 , m_isLargePage(numPages == 1) |
| 233 , m_numPages(numPages) |
| 234 { |
| 235 for (size_t i = 0; i < blinkPagesPerRegion; ++i) |
| 236 m_inUse[i] = false; |
| 237 } |
| 238 |
| 239 unsigned index(Address address) |
| 240 { |
| 241 ASSERT(contains(address)); |
| 242 if (m_isLargePage) |
| 243 return 0; |
| 244 size_t offset = blinkPageAddress(address) - base(); |
| 245 ASSERT(offset % blinkPageSize == 0); |
| 246 return offset / blinkPageSize; |
| 194 } | 247 } |
| 195 | 248 |
| 196 static PageMemoryRegion* allocate(size_t size, unsigned numPages) | 249 static PageMemoryRegion* allocate(size_t size, unsigned numPages) |
| 197 { | 250 { |
| 198 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | 251 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
| 199 | 252 |
| 200 // Compute a random blink page aligned address for the page memory | 253 // Compute a random blink page aligned address for the page memory |
| 201 // region and attempt to get the memory there. | 254 // region and attempt to get the memory there. |
| 202 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase
()); | 255 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase
()); |
| 203 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); | 256 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 267 | 320 |
| 268 // FIXME: If base is by accident blink page size aligned | 321 // FIXME: If base is by accident blink page size aligned |
| 269 // here then we can create two pages out of reserved | 322 // here then we can create two pages out of reserved |
| 270 // space. Do this. | 323 // space. Do this. |
| 271 Address alignedBase = roundToBlinkPageBoundary(base); | 324 Address alignedBase = roundToBlinkPageBoundary(base); |
| 272 | 325 |
| 273 return new PageMemoryRegion(alignedBase, size, numPages); | 326 return new PageMemoryRegion(alignedBase, size, numPages); |
| 274 #endif | 327 #endif |
| 275 } | 328 } |
| 276 | 329 |
| 277 private: | 330 bool m_isLargePage; |
| 278 PageMemoryRegion(Address base, size_t size, unsigned numPages) | 331 bool m_inUse[blinkPagesPerRegion]; |
| 279 : MemoryRegion(base, size) | |
| 280 , m_numPages(numPages) | |
| 281 { | |
| 282 } | |
| 283 | |
| 284 unsigned m_numPages; | 332 unsigned m_numPages; |
| 285 }; | 333 }; |
| 286 | 334 |
| 287 // Representation of the memory used for a Blink heap page. | 335 // Representation of the memory used for a Blink heap page. |
| 288 // | 336 // |
| 289 // The representation keeps track of two memory regions: | 337 // The representation keeps track of two memory regions: |
| 290 // | 338 // |
| 291 // 1. The virtual memory reserved from the system in order to be able | 339 // 1. The virtual memory reserved from the system in order to be able |
| 292 // to free all the virtual memory reserved. Multiple PageMemory | 340 // to free all the virtual memory reserved. Multiple PageMemory |
| 293 // instances can share the same reserved memory region and | 341 // instances can share the same reserved memory region and |
| 294 // therefore notify the reserved memory region on destruction so | 342 // therefore notify the reserved memory region on destruction so |
| 295 // that the system memory can be given back when all PageMemory | 343 // that the system memory can be given back when all PageMemory |
| 296 // instances for that memory are gone. | 344 // instances for that memory are gone. |
| 297 // | 345 // |
| 298 // 2. The writable memory (a sub-region of the reserved virtual | 346 // 2. The writable memory (a sub-region of the reserved virtual |
| 299 // memory region) that is used for the actual heap page payload. | 347 // memory region) that is used for the actual heap page payload. |
| 300 // | 348 // |
| 301 // Guard pages are created before and after the writable memory. | 349 // Guard pages are created before and after the writable memory. |
| 302 class PageMemory { | 350 class PageMemory { |
| 303 public: | 351 public: |
| 304 ~PageMemory() | 352 ~PageMemory() |
| 305 { | 353 { |
| 306 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); | 354 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| 307 m_reserved->pageRemoved(); | 355 m_reserved->pageDeleted(writableStart()); |
| 308 } | 356 } |
| 309 | 357 |
| 310 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } | 358 WARN_UNUSED_RETURN bool commit() |
| 311 void decommit() { m_writable.decommit(); } | 359 { |
| 360 m_reserved->markPageUsed(writableStart()); |
| 361 return m_writable.commit(); |
| 362 } |
| 363 |
| 364 void decommit() |
| 365 { |
| 366 m_reserved->markPageUnused(writableStart()); |
| 367 m_writable.decommit(); |
| 368 } |
| 369 |
| 370 void markUnused() { m_reserved->markPageUnused(writableStart()); } |
| 371 |
| 372 PageMemoryRegion* region() { return m_reserved; } |
| 312 | 373 |
| 313 Address writableStart() { return m_writable.base(); } | 374 Address writableStart() { return m_writable.base(); } |
| 314 | 375 |
| 315 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t
pageOffset, size_t payloadSize) | 376 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t
pageOffset, size_t payloadSize) |
| 316 { | 377 { |
| 317 // Setup the payload one OS page into the page memory. The | 378 // Setup the payload one OS page into the page memory. The |
| 318 // first os page is the guard page. | 379 // first os page is the guard page. |
| 319 Address payloadAddress = region->base() + pageOffset + osPageSize(); | 380 Address payloadAddress = region->base() + pageOffset + osPageSize(); |
| 320 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize))
; | 381 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize))
; |
| 321 } | 382 } |
| 322 | 383 |
| 323 // Allocate a virtual address space for one blink page with the | 384 // Allocate a virtual address space for one blink page with the |
| 324 // following layout: | 385 // following layout: |
| 325 // | 386 // |
| 326 // [ guard os page | ... payload ... | guard os page ] | 387 // [ guard os page | ... payload ... | guard os page ] |
| 327 // ^---{ aligned to blink page size } | 388 // ^---{ aligned to blink page size } |
| 328 // | 389 // |
| 329 static PageMemory* allocate(size_t payloadSize) | 390 static PageMemory* allocate(size_t payloadSize) |
| 330 { | 391 { |
| 331 ASSERT(payloadSize > 0); | 392 ASSERT(payloadSize > 0); |
| 332 | 393 |
| 333 // Virtual memory allocation routines operate in OS page sizes. | 394 // Virtual memory allocation routines operate in OS page sizes. |
| 334 // Round up the requested size to nearest os page size. | 395 // Round up the requested size to nearest os page size. |
| 335 payloadSize = roundToOsPageSize(payloadSize); | 396 payloadSize = roundToOsPageSize(payloadSize); |
| 336 | 397 |
| 337 // Overallocate by 2 times OS page size to have space for a | 398 // Overallocate by 2 times OS page size to have space for a |
| 338 // guard page at the beginning and end of blink heap page. | 399 // guard page at the beginning and end of blink heap page. |
| 339 size_t allocationSize = payloadSize + 2 * osPageSize(); | 400 size_t allocationSize = payloadSize + 2 * osPageSize(); |
| 340 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocati
onSize, 1); | 401 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage
(allocationSize); |
| 341 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, paylo
adSize); | 402 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, paylo
adSize); |
| 342 RELEASE_ASSERT(storage->commit()); | 403 RELEASE_ASSERT(storage->commit()); |
| 343 return storage; | 404 return storage; |
| 344 } | 405 } |
| 345 | 406 |
| 346 private: | 407 private: |
| 347 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) | 408 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) |
| 348 : m_reserved(reserved) | 409 : m_reserved(reserved) |
| 349 , m_writable(writable) | 410 , m_writable(writable) |
| 350 { | 411 { |
| (...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 602 ThreadHeap<Header>::~ThreadHeap() | 663 ThreadHeap<Header>::~ThreadHeap() |
| 603 { | 664 { |
| 604 ASSERT(!m_firstPage); | 665 ASSERT(!m_firstPage); |
| 605 ASSERT(!m_firstLargeHeapObject); | 666 ASSERT(!m_firstLargeHeapObject); |
| 606 } | 667 } |
| 607 | 668 |
| 608 template<typename Header> | 669 template<typename Header> |
| 609 void ThreadHeap<Header>::cleanupPages() | 670 void ThreadHeap<Header>::cleanupPages() |
| 610 { | 671 { |
| 611 clearFreeLists(); | 672 clearFreeLists(); |
| 612 flushHeapContainsCache(); | |
| 613 | 673 |
| 614 // Add the ThreadHeap's pages to the orphanedPagePool. | 674 // Add the ThreadHeap's pages to the orphanedPagePool. |
| 615 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) | 675 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) |
| 616 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | 676 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 617 m_firstPage = 0; | 677 m_firstPage = 0; |
| 618 | 678 |
| 619 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) | 679 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) |
| 620 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); | 680 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 621 m_firstLargeHeapObject = 0; | 681 m_firstLargeHeapObject = 0; |
| 622 } | 682 } |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 918 | 978 |
| 919 // If ASan is supported we add allocationGranularity bytes to the allocated
space and | 979 // If ASan is supported we add allocationGranularity bytes to the allocated
space and |
| 920 // poison that to detect overflows | 980 // poison that to detect overflows |
| 921 #if defined(ADDRESS_SANITIZER) | 981 #if defined(ADDRESS_SANITIZER) |
| 922 allocationSize += allocationGranularity; | 982 allocationSize += allocationGranularity; |
| 923 #endif | 983 #endif |
| 924 if (threadState()->shouldGC()) | 984 if (threadState()->shouldGC()) |
| 925 threadState()->setGCRequested(); | 985 threadState()->setGCRequested(); |
| 926 Heap::flushHeapDoesNotContainCache(); | 986 Heap::flushHeapDoesNotContainCache(); |
| 927 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 987 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| 988 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
| 928 Address largeObjectAddress = pageMemory->writableStart(); | 989 Address largeObjectAddress = pageMemory->writableStart(); |
| 929 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); | 990 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>)
+ headerPadding<Header>(); |
| 930 memset(headerAddress, 0, size); | 991 memset(headerAddress, 0, size); |
| 931 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 992 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
| 932 Address result = headerAddress + sizeof(*header); | 993 Address result = headerAddress + sizeof(*header); |
| 933 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 994 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 934 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); | 995 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObj
ect<Header>(pageMemory, gcInfo, threadState()); |
| 935 | 996 |
| 936 // Poison the object header and allocationGranularity bytes after the object | 997 // Poison the object header and allocationGranularity bytes after the object |
| 937 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 998 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 938 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 999 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
| 939 largeObject->link(&m_firstLargeHeapObject); | 1000 largeObject->link(&m_firstLargeHeapObject); |
| 940 stats().increaseAllocatedSpace(largeObject->size()); | 1001 stats().increaseAllocatedSpace(largeObject->size()); |
| 941 stats().increaseObjectSpace(largeObject->payloadSize()); | 1002 stats().increaseObjectSpace(largeObject->payloadSize()); |
| 942 return result; | 1003 return result; |
| 943 } | 1004 } |
| 944 | 1005 |
| 945 template<typename Header> | 1006 template<typename Header> |
| 946 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 1007 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
| 947 { | 1008 { |
| 948 flushHeapContainsCache(); | |
| 949 object->unlink(previousNext); | 1009 object->unlink(previousNext); |
| 950 object->finalize(); | 1010 object->finalize(); |
| 951 | 1011 |
| 952 // Unpoison the object header and allocationGranularity bytes after the | 1012 // Unpoison the object header and allocationGranularity bytes after the |
| 953 // object before freeing. | 1013 // object before freeing. |
| 954 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 1014 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
| 955 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 1015 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
| 956 | 1016 |
| 957 if (object->terminating()) { | 1017 if (object->terminating()) { |
| 958 ASSERT(ThreadState::current()->isTerminating()); | 1018 ASSERT(ThreadState::current()->isTerminating()); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1018 delete entry; | 1078 delete entry; |
| 1019 if (memory->commit()) | 1079 if (memory->commit()) |
| 1020 return memory; | 1080 return memory; |
| 1021 | 1081 |
| 1022 // We got some memory, but failed to commit it, try again. | 1082 // We got some memory, but failed to commit it, try again. |
| 1023 delete memory; | 1083 delete memory; |
| 1024 } | 1084 } |
| 1025 return 0; | 1085 return 0; |
| 1026 } | 1086 } |
| 1027 | 1087 |
| 1088 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat
e* state) |
| 1089 : m_storage(storage) |
| 1090 , m_gcInfo(gcInfo) |
| 1091 , m_threadState(state) |
| 1092 , m_terminating(false) |
| 1093 , m_tracedAfterOrphaned(false) |
| 1094 { |
| 1095 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 1096 } |
| 1097 |
| 1098 void BaseHeapPage::markOrphaned() |
| 1099 { |
| 1100 m_threadState = 0; |
| 1101 m_gcInfo = 0; |
| 1102 m_terminating = false; |
| 1103 m_tracedAfterOrphaned = false; |
| 1104 // Since we zap the page payload for orphaned pages we need to mark it as |
| 1105 // unused so a conservative pointer won't interpret the object headers. |
| 1106 storage()->markUnused(); |
| 1107 } |
| 1108 |
| 1028 OrphanedPagePool::~OrphanedPagePool() | 1109 OrphanedPagePool::~OrphanedPagePool() |
| 1029 { | 1110 { |
| 1030 for (int index = 0; index < NumberOfHeaps; ++index) { | 1111 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 1031 while (PoolEntry* entry = m_pool[index]) { | 1112 while (PoolEntry* entry = m_pool[index]) { |
| 1032 m_pool[index] = entry->next; | 1113 m_pool[index] = entry->next; |
| 1033 BaseHeapPage* page = entry->data; | 1114 BaseHeapPage* page = entry->data; |
| 1034 delete entry; | 1115 delete entry; |
| 1035 PageMemory* memory = page->storage(); | 1116 PageMemory* memory = page->storage(); |
| 1036 ASSERT(memory); | 1117 ASSERT(memory); |
| 1037 page->~BaseHeapPage(); | 1118 page->~BaseHeapPage(); |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1142 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap | 1223 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap |
| 1143 // since it is the same for all objects | 1224 // since it is the same for all objects |
| 1144 ASSERT(gcInfo); | 1225 ASSERT(gcInfo); |
| 1145 allocatePage(gcInfo); | 1226 allocatePage(gcInfo); |
| 1146 } | 1227 } |
| 1147 | 1228 |
| 1148 template <typename Header> | 1229 template <typename Header> |
| 1149 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) | 1230 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
| 1150 { | 1231 { |
| 1151 MutexLocker locker(m_threadState->sweepMutex()); | 1232 MutexLocker locker(m_threadState->sweepMutex()); |
| 1152 flushHeapContainsCache(); | |
| 1153 if (page->terminating()) { | 1233 if (page->terminating()) { |
| 1154 // The thread is shutting down so this page is being removed as part | 1234 // The thread is shutting down so this page is being removed as part |
| 1155 // of a thread local GC. In that case the page could be accessed in the | 1235 // of a thread local GC. In that case the page could be accessed in the |
| 1156 // next global GC either due to a dead object being traced via a | 1236 // next global GC either due to a dead object being traced via a |
| 1157 // conservative pointer or due to a programming error where an object | 1237 // conservative pointer or due to a programming error where an object |
| 1158 // in another thread heap keeps a dangling pointer to this object. | 1238 // in another thread heap keeps a dangling pointer to this object. |
| 1159 // To guard against this we put the page in the orphanedPagePool to | 1239 // To guard against this we put the page in the orphanedPagePool to |
| 1160 // ensure it is still reachable. After the next global GC it can be | 1240 // ensure it is still reachable. After the next global GC it can be |
| 1161 // decommitted and moved to the page pool assuming no rogue/dangling | 1241 // decommitted and moved to the page pool assuming no rogue/dangling |
| 1162 // pointers refer to it. | 1242 // pointers refer to it. |
| 1163 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | 1243 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 1164 } else { | 1244 } else { |
| 1165 PageMemory* memory = page->storage(); | 1245 PageMemory* memory = page->storage(); |
| 1166 page->~HeapPage<Header>(); | 1246 page->~HeapPage<Header>(); |
| 1167 Heap::freePagePool()->addFreePage(m_index, memory); | 1247 Heap::freePagePool()->addFreePage(m_index, memory); |
| 1168 } | 1248 } |
| 1169 } | 1249 } |
| 1170 | 1250 |
| 1171 template<typename Header> | 1251 template<typename Header> |
| 1172 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 1252 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| 1173 { | 1253 { |
| 1174 Heap::flushHeapDoesNotContainCache(); | 1254 Heap::flushHeapDoesNotContainCache(); |
| 1175 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); | 1255 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
| 1176 // We continue allocating page memory until we succeed in getting one. | 1256 // We continue allocating page memory until we succeed in committing one. |
| 1177 // Since the FreePagePool is global other threads could use all the | |
| 1178 // newly allocated page memory before this thread calls takeFreePage. | |
| 1179 while (!pageMemory) { | 1257 while (!pageMemory) { |
| 1180 // Allocate a memory region for blinkPagesPerRegion pages that | 1258 // Allocate a memory region for blinkPagesPerRegion pages that |
| 1181 // will each have the following layout. | 1259 // will each have the following layout. |
| 1182 // | 1260 // |
| 1183 // [ guard os page | ... payload ... | guard os page ] | 1261 // [ guard os page | ... payload ... | guard os page ] |
| 1184 // ^---{ aligned to blink page size } | 1262 // ^---{ aligned to blink page size } |
| 1185 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); | 1263 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); |
| 1264 m_threadState->allocatedRegionsSinceLastGC().append(region); |
| 1265 |
| 1186 // Setup the PageMemory object for each of the pages in the | 1266 // Setup the PageMemory object for each of the pages in the |
| 1187 // region. | 1267 // region. |
| 1188 size_t offset = 0; | 1268 size_t offset = 0; |
| 1189 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | 1269 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
| 1190 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo
ryInRegion(region, offset, blinkPagePayloadSize())); | 1270 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off
set, blinkPagePayloadSize()); |
| 1271 // Take the first possible page ensuring that this thread actually |
| 1272 // gets a page and add the rest to the page pool. |
| 1273 if (!pageMemory) { |
| 1274 if (memory->commit()) |
| 1275 pageMemory = memory; |
| 1276 else |
| 1277 delete memory; |
| 1278 } else { |
| 1279 Heap::freePagePool()->addFreePage(m_index, memory); |
| 1280 } |
| 1191 offset += blinkPageSize; | 1281 offset += blinkPageSize; |
| 1192 } | 1282 } |
| 1193 pageMemory = Heap::freePagePool()->takeFreePage(m_index); | |
| 1194 } | 1283 } |
| 1195 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 1284 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
| 1196 // Use a separate list for pages allocated during sweeping to make | 1285 // Use a separate list for pages allocated during sweeping to make |
| 1197 // sure that we do not accidentally sweep objects that have been | 1286 // sure that we do not accidentally sweep objects that have been |
| 1198 // allocated during sweeping. | 1287 // allocated during sweeping. |
| 1199 if (m_threadState->isSweepInProgress()) { | 1288 if (m_threadState->isSweepInProgress()) { |
| 1200 if (!m_lastPageAllocatedDuringSweeping) | 1289 if (!m_lastPageAllocatedDuringSweeping) |
| 1201 m_lastPageAllocatedDuringSweeping = page; | 1290 m_lastPageAllocatedDuringSweeping = page; |
| 1202 page->link(&m_firstPageAllocatedDuringSweeping); | 1291 page->link(&m_firstPageAllocatedDuringSweeping); |
| 1203 } else { | 1292 } else { |
| (...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1742 } | 1831 } |
| 1743 | 1832 |
| 1744 if (json) { | 1833 if (json) { |
| 1745 json->setInteger("class", tag); | 1834 json->setInteger("class", tag); |
| 1746 json->setInteger("size", header->size()); | 1835 json->setInteger("size", header->size()); |
| 1747 json->setInteger("isMarked", isMarked()); | 1836 json->setInteger("isMarked", isMarked()); |
| 1748 } | 1837 } |
| 1749 } | 1838 } |
| 1750 #endif | 1839 #endif |
| 1751 | 1840 |
| 1752 template<typename Entry> | 1841 void HeapDoesNotContainCache::flush() |
| 1753 void HeapExtentCache<Entry>::flush() | |
| 1754 { | 1842 { |
| 1755 if (m_hasEntries) { | 1843 if (m_hasEntries) { |
| 1756 for (int i = 0; i < numberOfEntries; i++) | 1844 for (int i = 0; i < numberOfEntries; i++) |
| 1757 m_entries[i] = Entry(); | 1845 m_entries[i] = 0; |
| 1758 m_hasEntries = false; | 1846 m_hasEntries = false; |
| 1759 } | 1847 } |
| 1760 } | 1848 } |
| 1761 | 1849 |
| 1762 template<typename Entry> | 1850 size_t HeapDoesNotContainCache::hash(Address address) |
| 1763 size_t HeapExtentCache<Entry>::hash(Address address) | |
| 1764 { | 1851 { |
| 1765 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1852 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| 1766 value ^= value >> numberOfEntriesLog2; | 1853 value ^= value >> numberOfEntriesLog2; |
| 1767 value ^= value >> (numberOfEntriesLog2 * 2); | 1854 value ^= value >> (numberOfEntriesLog2 * 2); |
| 1768 value &= numberOfEntries - 1; | 1855 value &= numberOfEntries - 1; |
| 1769 return value & ~1; // Returns only even number. | 1856 return value & ~1; // Returns only even number. |
| 1770 } | 1857 } |
| 1771 | 1858 |
| 1772 template<typename Entry> | 1859 bool HeapDoesNotContainCache::lookup(Address address) |
| 1773 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) | |
| 1774 { | 1860 { |
| 1775 size_t index = hash(address); | 1861 size_t index = hash(address); |
| 1776 ASSERT(!(index & 1)); | 1862 ASSERT(!(index & 1)); |
| 1777 Address cachePage = roundToBlinkPageStart(address); | 1863 Address cachePage = roundToBlinkPageStart(address); |
| 1778 if (m_entries[index].address() == cachePage) | 1864 if (m_entries[index] == cachePage) |
| 1779 return m_entries[index].result(); | 1865 return m_entries[index]; |
| 1780 if (m_entries[index + 1].address() == cachePage) | 1866 if (m_entries[index + 1] == cachePage) |
| 1781 return m_entries[index + 1].result(); | 1867 return m_entries[index + 1]; |
| 1782 return 0; | 1868 return 0; |
| 1783 } | 1869 } |
| 1784 | 1870 |
| 1785 template<typename Entry> | 1871 void HeapDoesNotContainCache::addEntry(Address address) |
| 1786 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupRes
ult entry) | |
| 1787 { | 1872 { |
| 1788 m_hasEntries = true; | 1873 m_hasEntries = true; |
| 1789 size_t index = hash(address); | 1874 size_t index = hash(address); |
| 1790 ASSERT(!(index & 1)); | 1875 ASSERT(!(index & 1)); |
| 1791 Address cachePage = roundToBlinkPageStart(address); | 1876 Address cachePage = roundToBlinkPageStart(address); |
| 1792 m_entries[index + 1] = m_entries[index]; | 1877 m_entries[index + 1] = m_entries[index]; |
| 1793 m_entries[index] = Entry(cachePage, entry); | 1878 m_entries[index] = cachePage; |
| 1794 } | |
| 1795 | |
| 1796 // These should not be needed, but it seems impossible to persuade clang to | |
| 1797 // instantiate the template functions and export them from a shared library, so | |
| 1798 // we add these in the non-templated subclass, which does not have that issue. | |
| 1799 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) | |
| 1800 { | |
| 1801 HeapExtentCache<PositiveEntry>::addEntry(address, page); | |
| 1802 } | |
| 1803 | |
| 1804 BaseHeapPage* HeapContainsCache::lookup(Address address) | |
| 1805 { | |
| 1806 return HeapExtentCache<PositiveEntry>::lookup(address); | |
| 1807 } | 1879 } |
| 1808 | 1880 |
| 1809 void Heap::flushHeapDoesNotContainCache() | 1881 void Heap::flushHeapDoesNotContainCache() |
| 1810 { | 1882 { |
| 1811 s_heapDoesNotContainCache->flush(); | 1883 s_heapDoesNotContainCache->flush(); |
| 1812 } | 1884 } |
| 1813 | 1885 |
| 1814 // The marking mutex is used to ensure sequential access to data | 1886 // The marking mutex is used to ensure sequential access to data |
| 1815 // structures during marking. The marking mutex needs to be acquired | 1887 // structures during marking. The marking mutex needs to be acquired |
| 1816 // during marking when elements are taken from the global marking | 1888 // during marking when elements are taken from the global marking |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2099 delete s_orphanedPagePool; | 2171 delete s_orphanedPagePool; |
| 2100 s_orphanedPagePool = 0; | 2172 s_orphanedPagePool = 0; |
| 2101 delete s_weakCallbackStack; | 2173 delete s_weakCallbackStack; |
| 2102 s_weakCallbackStack = 0; | 2174 s_weakCallbackStack = 0; |
| 2103 delete s_postMarkingCallbackStack; | 2175 delete s_postMarkingCallbackStack; |
| 2104 s_postMarkingCallbackStack = 0; | 2176 s_postMarkingCallbackStack = 0; |
| 2105 delete s_markingStack; | 2177 delete s_markingStack; |
| 2106 s_markingStack = 0; | 2178 s_markingStack = 0; |
| 2107 delete s_ephemeronStack; | 2179 delete s_ephemeronStack; |
| 2108 s_ephemeronStack = 0; | 2180 s_ephemeronStack = 0; |
| 2181 delete s_regionTree; |
| 2182 s_regionTree = 0; |
| 2109 ThreadState::shutdown(); | 2183 ThreadState::shutdown(); |
| 2110 } | 2184 } |
| 2111 | 2185 |
| 2112 BaseHeapPage* Heap::contains(Address address) | 2186 BaseHeapPage* Heap::contains(Address address) |
| 2113 { | 2187 { |
| 2114 ASSERT(ThreadState::isAnyThreadInGC()); | 2188 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2115 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2189 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 2116 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 2190 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
| 2117 BaseHeapPage* page = (*it)->contains(address); | 2191 BaseHeapPage* page = (*it)->contains(address); |
| 2118 if (page) | 2192 if (page) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2130 | 2204 |
| 2131 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 2205 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 2132 { | 2206 { |
| 2133 ASSERT(ThreadState::isAnyThreadInGC()); | 2207 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2134 | 2208 |
| 2135 #if !ENABLE(ASSERT) | 2209 #if !ENABLE(ASSERT) |
| 2136 if (s_heapDoesNotContainCache->lookup(address)) | 2210 if (s_heapDoesNotContainCache->lookup(address)) |
| 2137 return 0; | 2211 return 0; |
| 2138 #endif | 2212 #endif |
| 2139 | 2213 |
| 2140 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2214 if (BaseHeapPage* page = lookup(address)) { |
| 2141 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 2215 ASSERT(page->contains(address)); |
| 2142 if ((*it)->checkAndMarkPointer(visitor, address)) { | 2216 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| 2143 // Pointer was in a page of that thread. If it actually pointed | 2217 page->checkAndMarkPointer(visitor, address); |
| 2144 // into an object then that object was found and marked. | 2218 // FIXME: We only need to set the conservative flag if checkAndMarkPoint
er actually marked the pointer. |
| 2145 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 2219 s_lastGCWasConservative = true; |
| 2146 s_lastGCWasConservative = true; | 2220 return address; |
| 2147 return address; | |
| 2148 } | |
| 2149 } | 2221 } |
| 2150 | 2222 |
| 2151 #if !ENABLE(ASSERT) | 2223 #if !ENABLE(ASSERT) |
| 2152 s_heapDoesNotContainCache->addEntry(address, true); | 2224 s_heapDoesNotContainCache->addEntry(address); |
| 2153 #else | 2225 #else |
| 2154 if (!s_heapDoesNotContainCache->lookup(address)) | 2226 if (!s_heapDoesNotContainCache->lookup(address)) |
| 2155 s_heapDoesNotContainCache->addEntry(address, true); | 2227 s_heapDoesNotContainCache->addEntry(address); |
| 2156 #endif | 2228 #endif |
| 2157 return 0; | 2229 return 0; |
| 2158 } | 2230 } |
| 2159 | 2231 |
| 2160 #if ENABLE(GC_PROFILE_MARKING) | 2232 #if ENABLE(GC_PROFILE_MARKING) |
| 2161 const GCInfo* Heap::findGCInfo(Address address) | 2233 const GCInfo* Heap::findGCInfo(Address address) |
| 2162 { | 2234 { |
| 2163 return ThreadState::findGCInfoFromAllThreads(address); | 2235 return ThreadState::findGCInfoFromAllThreads(address); |
| 2164 } | 2236 } |
| 2165 #endif | 2237 #endif |
| (...skipping 543 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2709 | 2781 |
| 2710 HeaderType* header = HeaderType::fromPayload(address); | 2782 HeaderType* header = HeaderType::fromPayload(address); |
| 2711 header->checkHeader(); | 2783 header->checkHeader(); |
| 2712 | 2784 |
| 2713 const GCInfo* gcInfo = header->gcInfo(); | 2785 const GCInfo* gcInfo = header->gcInfo(); |
| 2714 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 2786 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); |
| 2715 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); | 2787 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex)); |
| 2716 heap->promptlyFreeObject(header); | 2788 heap->promptlyFreeObject(header); |
| 2717 } | 2789 } |
| 2718 | 2790 |
| 2791 BaseHeapPage* Heap::lookup(Address address) |
| 2792 { |
| 2793 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2794 if (!s_regionTree) |
| 2795 return 0; |
| 2796 if (PageMemoryRegion* region = s_regionTree->lookup(address)) |
| 2797 return region->pageFromAddress(address); |
| 2798 return 0; |
| 2799 } |
| 2800 |
| 2801 static Mutex& regionTreeMutex() |
| 2802 { |
| 2803 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 2804 return mutex; |
| 2805 } |
| 2806 |
| 2807 void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| 2808 { |
| 2809 // Deletion of large objects (and thus their regions) can happen concurrentl
y |
| 2810 // on sweeper threads. Removal can also happen during thread shutdown, but |
| 2811 // that case is safe. Regardless, we make all removals mutually exclusive. |
| 2812 MutexLocker locker(regionTreeMutex()); |
| 2813 RegionTree::remove(region, &s_regionTree); |
| 2814 } |
| 2815 |
| 2816 void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| 2817 { |
| 2818 ASSERT(ThreadState::isAnyThreadInGC()); |
| 2819 RegionTree::add(new RegionTree(region), &s_regionTree); |
| 2820 } |
| 2821 |
| 2822 PageMemoryRegion* Heap::RegionTree::lookup(Address address) |
| 2823 { |
| 2824 RegionTree* current = s_regionTree; |
| 2825 while (current) { |
| 2826 Address base = current->m_region->base(); |
| 2827 if (address < base) { |
| 2828 current = current->m_left; |
| 2829 continue; |
| 2830 } |
| 2831 if (address >= base + current->m_region->size()) { |
| 2832 current = current->m_right; |
| 2833 continue; |
| 2834 } |
| 2835 ASSERT(current->m_region->contains(address)); |
| 2836 return current->m_region; |
| 2837 } |
| 2838 return 0; |
| 2839 } |
| 2840 |
| 2841 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) |
| 2842 { |
| 2843 ASSERT(newTree); |
| 2844 Address base = newTree->m_region->base(); |
| 2845 for (RegionTree* current = *context; current; current = *context) { |
| 2846 ASSERT(!current->m_region->contains(base)); |
| 2847 context = (base < current->m_region->base()) ? ¤t->m_left : &curre
nt->m_right; |
| 2848 } |
| 2849 *context = newTree; |
| 2850 } |
| 2851 |
| 2852 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) |
| 2853 { |
| 2854 ASSERT(region); |
| 2855 ASSERT(context); |
| 2856 Address base = region->base(); |
| 2857 RegionTree* current = *context; |
| 2858 for ( ; current; current = *context) { |
| 2859 if (region == current->m_region) |
| 2860 break; |
| 2861 context = (base < current->m_region->base()) ? ¤t->m_left : &curre
nt->m_right; |
| 2862 } |
| 2863 |
| 2864 // Shutdown via detachMainThread might not have populated the region tree. |
| 2865 if (!current) |
| 2866 return; |
| 2867 |
| 2868 *context = 0; |
| 2869 if (current->m_left) { |
| 2870 add(current->m_left, context); |
| 2871 current->m_left = 0; |
| 2872 } |
| 2873 if (current->m_right) { |
| 2874 add(current->m_right, context); |
| 2875 current->m_right = 0; |
| 2876 } |
| 2877 delete current; |
| 2878 } |
| 2879 |
| 2719 // Force template instantiations for the types that we need. | 2880 // Force template instantiations for the types that we need. |
| 2720 template class HeapPage<FinalizedHeapObjectHeader>; | 2881 template class HeapPage<FinalizedHeapObjectHeader>; |
| 2721 template class HeapPage<HeapObjectHeader>; | 2882 template class HeapPage<HeapObjectHeader>; |
| 2722 template class ThreadHeap<FinalizedHeapObjectHeader>; | 2883 template class ThreadHeap<FinalizedHeapObjectHeader>; |
| 2723 template class ThreadHeap<HeapObjectHeader>; | 2884 template class ThreadHeap<HeapObjectHeader>; |
| 2724 | 2885 |
| 2725 Visitor* Heap::s_markingVisitor; | 2886 Visitor* Heap::s_markingVisitor; |
| 2726 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; | 2887 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads; |
| 2727 CallbackStack* Heap::s_markingStack; | 2888 CallbackStack* Heap::s_markingStack; |
| 2728 CallbackStack* Heap::s_postMarkingCallbackStack; | 2889 CallbackStack* Heap::s_postMarkingCallbackStack; |
| 2729 CallbackStack* Heap::s_weakCallbackStack; | 2890 CallbackStack* Heap::s_weakCallbackStack; |
| 2730 CallbackStack* Heap::s_ephemeronStack; | 2891 CallbackStack* Heap::s_ephemeronStack; |
| 2731 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2892 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2732 bool Heap::s_shutdownCalled = false; | 2893 bool Heap::s_shutdownCalled = false; |
| 2733 bool Heap::s_lastGCWasConservative = false; | 2894 bool Heap::s_lastGCWasConservative = false; |
| 2734 FreePagePool* Heap::s_freePagePool; | 2895 FreePagePool* Heap::s_freePagePool; |
| 2735 OrphanedPagePool* Heap::s_orphanedPagePool; | 2896 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2897 Heap::RegionTree* Heap::s_regionTree = 0; |
| 2898 |
| 2736 } | 2899 } |
| OLD | NEW |