OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 16 matching lines...) Expand all Loading... |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #include "config.h" | 31 #include "config.h" |
32 #include "platform/heap/Heap.h" | 32 #include "platform/heap/Heap.h" |
33 | 33 |
34 #include "platform/TraceEvent.h" | 34 #include "platform/TraceEvent.h" |
35 #include "platform/heap/ThreadState.h" | 35 #include "platform/heap/ThreadState.h" |
36 #include "public/platform/Platform.h" | 36 #include "public/platform/Platform.h" |
| 37 #include "wtf/AddressSpaceRandomization.h" |
37 #include "wtf/Assertions.h" | 38 #include "wtf/Assertions.h" |
38 #include "wtf/LeakAnnotations.h" | 39 #include "wtf/LeakAnnotations.h" |
39 #include "wtf/PassOwnPtr.h" | 40 #include "wtf/PassOwnPtr.h" |
40 #if ENABLE(GC_TRACING) | 41 #if ENABLE(GC_TRACING) |
41 #include "wtf/HashMap.h" | 42 #include "wtf/HashMap.h" |
42 #include "wtf/HashSet.h" | 43 #include "wtf/HashSet.h" |
43 #include "wtf/text/StringBuilder.h" | 44 #include "wtf/text/StringBuilder.h" |
44 #include "wtf/text/StringHash.h" | 45 #include "wtf/text/StringHash.h" |
45 #include <stdio.h> | 46 #include <stdio.h> |
46 #include <utility> | 47 #include <utility> |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
165 } | 166 } |
166 | 167 |
167 Address base() const { return m_base; } | 168 Address base() const { return m_base; } |
168 size_t size() const { return m_size; } | 169 size_t size() const { return m_size; } |
169 | 170 |
170 private: | 171 private: |
171 Address m_base; | 172 Address m_base; |
172 size_t m_size; | 173 size_t m_size; |
173 }; | 174 }; |
174 | 175 |
| 176 // A PageMemoryRegion represents a chunk of reserved virtual address |
| 177 // space containing a number of blink heap pages. On Window, reserved |
| 178 // virtual address space can only be given back to the system as a |
| 179 // whole. The PageMemoryRegion allows us to do that be keeping track |
| 180 // of the number of pages using it in order to be able to release all |
| 181 // of the virtual address space when there are no more pages using it. |
| 182 class PageMemoryRegion : public MemoryRegion { |
| 183 public: |
| 184 ~PageMemoryRegion() |
| 185 { |
| 186 release(); |
| 187 } |
| 188 |
| 189 void pageRemoved() |
| 190 { |
| 191 if (!--m_numPages) |
| 192 delete this; |
| 193 } |
| 194 |
| 195 static PageMemoryRegion* allocate(size_t size, unsigned numPages) |
| 196 { |
| 197 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
| 198 |
| 199 // Compute a random blink page aligned address for the page memory |
| 200 // region and attempt to get the memory there. |
| 201 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase
()); |
| 202 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); |
| 203 |
| 204 #if OS(POSIX) |
| 205 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PRO
T_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| 206 RELEASE_ASSERT(base != MAP_FAILED); |
| 207 if (base == roundToBlinkPageBoundary(base)) |
| 208 return new PageMemoryRegion(base, size, numPages); |
| 209 |
| 210 // We failed to get a blink page aligned chunk of |
| 211 // memory. Unmap the chunk that we got and fall back to |
| 212 // overallocating and selecting an aligned sub part of what |
| 213 // we allocate. |
| 214 int error = munmap(base, size); |
| 215 RELEASE_ASSERT(!error); |
| 216 size_t allocationSize = size + blinkPageSize; |
| 217 base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, P
ROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| 218 RELEASE_ASSERT(base != MAP_FAILED); |
| 219 |
| 220 Address end = base + allocationSize; |
| 221 Address alignedBase = roundToBlinkPageBoundary(base); |
| 222 Address regionEnd = alignedBase + size; |
| 223 |
| 224 // If the allocate memory was not blink page aligned release |
| 225 // the memory before the aligned address. |
| 226 if (alignedBase != base) |
| 227 MemoryRegion(base, alignedBase - base).release(); |
| 228 |
| 229 // Free the additional memory at the end of the page if any. |
| 230 if (regionEnd < end) |
| 231 MemoryRegion(regionEnd, end - regionEnd).release(); |
| 232 |
| 233 return new PageMemoryRegion(alignedBase, size, numPages); |
| 234 #else |
| 235 Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, s
ize, MEM_RESERVE, PAGE_NOACCESS)); |
| 236 if (base) { |
| 237 ASSERT(base == alignedRandomAddress); |
| 238 return new PageMemoryRegion(base, size, numPages); |
| 239 } |
| 240 |
| 241 // We failed to get the random aligned address that we asked |
| 242 // for. Fall back to overallocating. On Windows it is |
| 243 // impossible to partially release a region of memory |
| 244 // allocated by VirtualAlloc. To avoid wasting virtual address |
| 245 // space we attempt to release a large region of memory |
| 246 // returned as a whole and then allocate an aligned region |
| 247 // inside this larger region. |
| 248 size_t allocationSize = size + blinkPageSize; |
| 249 for (int attempt = 0; attempt < 3; attempt++) { |
| 250 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE
RVE, PAGE_NOACCESS)); |
| 251 RELEASE_ASSERT(base); |
| 252 VirtualFree(base, 0, MEM_RELEASE); |
| 253 |
| 254 Address alignedBase = roundToBlinkPageBoundary(base); |
| 255 base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESE
RVE, PAGE_NOACCESS)); |
| 256 if (base) { |
| 257 ASSERT(base == alignedBase); |
| 258 return new PageMemoryRegion(alignedBase, size, numPages); |
| 259 } |
| 260 } |
| 261 |
| 262 // We failed to avoid wasting virtual address space after |
| 263 // several attempts. |
| 264 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE,
PAGE_NOACCESS)); |
| 265 RELEASE_ASSERT(base); |
| 266 |
| 267 // FIXME: If base is by accident blink page size aligned |
| 268 // here then we can create two pages out of reserved |
| 269 // space. Do this. |
| 270 Address alignedBase = roundToBlinkPageBoundary(base); |
| 271 |
| 272 return new PageMemoryRegion(alignedBase, size, numPages); |
| 273 #endif |
| 274 } |
| 275 |
| 276 private: |
| 277 PageMemoryRegion(Address base, size_t size, unsigned numPages) |
| 278 : MemoryRegion(base, size) |
| 279 , m_numPages(numPages) |
| 280 { |
| 281 } |
| 282 |
| 283 unsigned m_numPages; |
| 284 }; |
| 285 |
175 // Representation of the memory used for a Blink heap page. | 286 // Representation of the memory used for a Blink heap page. |
176 // | 287 // |
177 // The representation keeps track of two memory regions: | 288 // The representation keeps track of two memory regions: |
178 // | 289 // |
179 // 1. The virtual memory reserved from the sytem in order to be able | 290 // 1. The virtual memory reserved from the system in order to be able |
180 // to free all the virtual memory reserved on destruction. | 291 // to free all the virtual memory reserved. Multiple PageMemory |
| 292 // instances can share the same reserved memory region and |
| 293 // therefore notify the reserved memory region on destruction so |
| 294 // that the system memory can be given back when all PageMemory |
| 295 // instances for that memory are gone. |
181 // | 296 // |
182 // 2. The writable memory (a sub-region of the reserved virtual | 297 // 2. The writable memory (a sub-region of the reserved virtual |
183 // memory region) that is used for the actual heap page payload. | 298 // memory region) that is used for the actual heap page payload. |
184 // | 299 // |
185 // Guard pages are created before and after the writable memory. | 300 // Guard pages are created before and after the writable memory. |
186 class PageMemory { | 301 class PageMemory { |
187 public: | 302 public: |
188 ~PageMemory() | 303 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) |
189 { | |
190 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); | |
191 m_reserved.release(); | |
192 } | |
193 | |
194 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } | |
195 void decommit() { m_writable.decommit(); } | |
196 | |
197 Address writableStart() { return m_writable.base(); } | |
198 | |
199 // Allocate a virtual address space for the blink page with the | |
200 // following layout: | |
201 // | |
202 // [ guard os page | ... payload ... | guard os page ] | |
203 // ^---{ aligned to blink page size } | |
204 // | |
205 static PageMemory* allocate(size_t payloadSize) | |
206 { | |
207 ASSERT(payloadSize > 0); | |
208 | |
209 // Virtual memory allocation routines operate in OS page sizes. | |
210 // Round up the requested size to nearest os page size. | |
211 payloadSize = roundToOsPageSize(payloadSize); | |
212 | |
213 // Overallocate by blinkPageSize and 2 times OS page size to | |
214 // ensure a chunk of memory which is blinkPageSize aligned and | |
215 // has a system page before and after to use for guarding. We | |
216 // unmap the excess memory before returning. | |
217 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; | |
218 | |
219 ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); | |
220 #if OS(POSIX) | |
221 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ |
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); | |
222 RELEASE_ASSERT(base != MAP_FAILED); | |
223 | |
224 Address end = base + allocationSize; | |
225 Address alignedBase = roundToBlinkPageBoundary(base); | |
226 Address payloadBase = alignedBase + osPageSize(); | |
227 Address payloadEnd = payloadBase + payloadSize; | |
228 Address blinkPageEnd = payloadEnd + osPageSize(); | |
229 | |
230 // If the allocate memory was not blink page aligned release | |
231 // the memory before the aligned address. | |
232 if (alignedBase != base) | |
233 MemoryRegion(base, alignedBase - base).release(); | |
234 | |
235 // Create guard pages by decommiting an OS page before and | |
236 // after the payload. | |
237 MemoryRegion(alignedBase, osPageSize()).decommit(); | |
238 MemoryRegion(payloadEnd, osPageSize()).decommit(); | |
239 | |
240 // Free the additional memory at the end of the page if any. | |
241 if (blinkPageEnd < end) | |
242 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release(); | |
243 | |
244 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBa
se), MemoryRegion(payloadBase, payloadSize)); | |
245 #else | |
246 Address base = 0; | |
247 Address alignedBase = 0; | |
248 | |
249 // On Windows it is impossible to partially release a region | |
250 // of memory allocated by VirtualAlloc. To avoid wasting | |
251 // virtual address space we attempt to release a large region | |
252 // of memory returned as a whole and then allocate an aligned | |
253 // region inside this larger region. | |
254 for (int attempt = 0; attempt < 3; attempt++) { | |
255 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE
RVE, PAGE_NOACCESS)); | |
256 RELEASE_ASSERT(base); | |
257 VirtualFree(base, 0, MEM_RELEASE); | |
258 | |
259 alignedBase = roundToBlinkPageBoundary(base); | |
260 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize +
2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS)); | |
261 if (base) { | |
262 RELEASE_ASSERT(base == alignedBase); | |
263 allocationSize = payloadSize + 2 * osPageSize(); | |
264 break; | |
265 } | |
266 } | |
267 | |
268 if (!base) { | |
269 // We failed to avoid wasting virtual address space after | |
270 // several attempts. | |
271 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESE
RVE, PAGE_NOACCESS)); | |
272 RELEASE_ASSERT(base); | |
273 | |
274 // FIXME: If base is by accident blink page size aligned | |
275 // here then we can create two pages out of reserved | |
276 // space. Do this. | |
277 alignedBase = roundToBlinkPageBoundary(base); | |
278 } | |
279 | |
280 Address payloadBase = alignedBase + osPageSize(); | |
281 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize),
MemoryRegion(payloadBase, payloadSize)); | |
282 bool res = storage->commit(); | |
283 RELEASE_ASSERT(res); | |
284 return storage; | |
285 #endif | |
286 } | |
287 | |
288 private: | |
289 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable) | |
290 : m_reserved(reserved) | 304 : m_reserved(reserved) |
291 , m_writable(writable) | 305 , m_writable(writable) |
292 { | 306 { |
293 ASSERT(reserved.contains(writable)); | 307 ASSERT(reserved->contains(writable)); |
294 | 308 |
295 // Register the writable area of the memory as part of the LSan root set
. | 309 // Register the writable area of the memory as part of the LSan root set
. |
296 // Only the writable area is mapped and can contain C++ objects. Those | 310 // Only the writable area is mapped and can contain C++ objects. Those |
297 // C++ objects can contain pointers to objects outside of the heap and | 311 // C++ objects can contain pointers to objects outside of the heap and |
298 // should therefore be part of the LSan root set. | 312 // should therefore be part of the LSan root set. |
299 __lsan_register_root_region(m_writable.base(), m_writable.size()); | 313 __lsan_register_root_region(m_writable.base(), m_writable.size()); |
300 } | 314 } |
301 | 315 |
302 MemoryRegion m_reserved; | 316 ~PageMemory() |
| 317 { |
| 318 __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| 319 m_reserved->pageRemoved(); |
| 320 } |
| 321 |
| 322 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } |
| 323 void decommit() { m_writable.decommit(); } |
| 324 |
| 325 Address writableStart() { return m_writable.base(); } |
| 326 |
| 327 // Allocate a virtual address space for one blink page with the |
| 328 // following layout: |
| 329 // |
| 330 // [ guard os page | ... payload ... | guard os page ] |
| 331 // ^---{ aligned to blink page size } |
| 332 // |
| 333 static PageMemory* allocate(size_t payloadSize) |
| 334 { |
| 335 ASSERT(payloadSize > 0); |
| 336 |
| 337 // Virtual memory allocation routines operate in OS page sizes. |
| 338 // Round up the requested size to nearest os page size. |
| 339 payloadSize = roundToOsPageSize(payloadSize); |
| 340 |
| 341 // Overallocate by 2 times OS page size to have space for a |
| 342 // guard page at the beginning and end of blink heap page. |
| 343 size_t allocationSize = payloadSize + 2 * osPageSize(); |
| 344 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocati
onSize, 1); |
| 345 Address payload = pageMemoryRegion->base() + osPageSize(); |
| 346 PageMemory* storage = new PageMemory(pageMemoryRegion, MemoryRegion(payl
oad, payloadSize)); |
| 347 RELEASE_ASSERT(storage->commit()); |
| 348 return storage; |
| 349 } |
| 350 |
| 351 private: |
| 352 PageMemoryRegion* m_reserved; |
303 MemoryRegion m_writable; | 353 MemoryRegion m_writable; |
304 }; | 354 }; |
305 | 355 |
306 class GCScope { | 356 class GCScope { |
307 public: | 357 public: |
308 explicit GCScope(ThreadState::StackState stackState) | 358 explicit GCScope(ThreadState::StackState stackState) |
309 : m_state(ThreadState::current()) | 359 : m_state(ThreadState::current()) |
310 , m_safePointScope(stackState) | 360 , m_safePointScope(stackState) |
311 , m_parkedAllThreads(false) | 361 , m_parkedAllThreads(false) |
312 { | 362 { |
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
713 return storage; | 763 return storage; |
714 | 764 |
715 // Failed to commit pooled storage. Release it. | 765 // Failed to commit pooled storage. Release it. |
716 delete storage; | 766 delete storage; |
717 } | 767 } |
718 | 768 |
719 return 0; | 769 return 0; |
720 } | 770 } |
721 | 771 |
722 template<typename Header> | 772 template<typename Header> |
723 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) | 773 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) |
724 { | 774 { |
725 flushHeapContainsCache(); | 775 flushHeapContainsCache(); |
726 PageMemory* storage = unused->storage(); | |
727 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 776 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
728 m_pagePool = entry; | 777 m_pagePool = entry; |
| 778 } |
| 779 |
| 780 template <typename Header> |
| 781 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) |
| 782 { |
| 783 PageMemory* storage = page->storage(); |
729 storage->decommit(); | 784 storage->decommit(); |
| 785 addPageMemoryToPool(storage); |
730 } | 786 } |
731 | 787 |
732 template<typename Header> | 788 template<typename Header> |
733 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 789 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
734 { | 790 { |
735 Heap::flushHeapDoesNotContainCache(); | 791 Heap::flushHeapDoesNotContainCache(); |
736 PageMemory* pageMemory = takePageFromPool(); | 792 PageMemory* pageMemory = takePageFromPool(); |
737 if (!pageMemory) { | 793 if (!pageMemory) { |
738 pageMemory = PageMemory::allocate(blinkPagePayloadSize()); | 794 // Allocate a memory region for blinkPagesPerRegion pages that |
| 795 // will each have the following layout. |
| 796 // |
| 797 // [ guard os page | ... payload ... | guard os page ] |
| 798 // ^---{ aligned to blink page size } |
| 799 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); |
| 800 // Setup the PageMemory object for each of the pages in the |
| 801 // region. The first payload address is after one OS page |
| 802 // which will act as a guard page. |
| 803 ASSERT(blinkPageSize == blinkPagePayloadSize() + 2 * osPageSize()); |
| 804 Address payload = region->base() + osPageSize(); |
| 805 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
| 806 addPageMemoryToPool(new PageMemory(region, MemoryRegion(payload, bli
nkPagePayloadSize()))); |
| 807 payload += blinkPageSize; |
| 808 } |
| 809 pageMemory = takePageFromPool(); |
739 RELEASE_ASSERT(pageMemory); | 810 RELEASE_ASSERT(pageMemory); |
740 } | 811 } |
741 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 812 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
742 // FIXME: Oilpan: Linking new pages into the front of the list is | 813 // FIXME: Oilpan: Linking new pages into the front of the list is |
743 // crucial when performing allocations during finalization because | 814 // crucial when performing allocations during finalization because |
744 // it ensures that those pages are not swept in the current GC | 815 // it ensures that those pages are not swept in the current GC |
745 // round. We should create a separate page list for that to | 816 // round. We should create a separate page list for that to |
746 // separate out the pages allocated during finalization clearly | 817 // separate out the pages allocated during finalization clearly |
747 // from the pages currently being swept. | 818 // from the pages currently being swept. |
748 page->link(&m_firstPage); | 819 page->link(&m_firstPage); |
(...skipping 1152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1901 template class ThreadHeap<HeapObjectHeader>; | 1972 template class ThreadHeap<HeapObjectHeader>; |
1902 | 1973 |
1903 Visitor* Heap::s_markingVisitor; | 1974 Visitor* Heap::s_markingVisitor; |
1904 CallbackStack* Heap::s_markingStack; | 1975 CallbackStack* Heap::s_markingStack; |
1905 CallbackStack* Heap::s_weakCallbackStack; | 1976 CallbackStack* Heap::s_weakCallbackStack; |
1906 CallbackStack* Heap::s_ephemeronStack; | 1977 CallbackStack* Heap::s_ephemeronStack; |
1907 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 1978 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
1908 bool Heap::s_shutdownCalled = false; | 1979 bool Heap::s_shutdownCalled = false; |
1909 bool Heap::s_lastGCWasConservative = false; | 1980 bool Heap::s_lastGCWasConservative = false; |
1910 } | 1981 } |
OLD | NEW |