OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
45 #include "wtf/ThreadSafeRefCounted.h" | 45 #include "wtf/ThreadSafeRefCounted.h" |
46 | 46 |
47 #include <stdint.h> | 47 #include <stdint.h> |
48 | 48 |
49 namespace WebCore { | 49 namespace WebCore { |
50 | 50 |
51 const size_t blinkPageSizeLog2 = 17; | 51 const size_t blinkPageSizeLog2 = 17; |
52 const size_t blinkPageSize = 1 << blinkPageSizeLog2; | 52 const size_t blinkPageSize = 1 << blinkPageSizeLog2; |
53 const size_t blinkPageOffsetMask = blinkPageSize - 1; | 53 const size_t blinkPageOffsetMask = blinkPageSize - 1; |
54 const size_t blinkPageBaseMask = ~blinkPageOffsetMask; | 54 const size_t blinkPageBaseMask = ~blinkPageOffsetMask; |
| 55 |
| 56 // We allocate pages at random addresses but in groups of |
| 57 // blinkPagesPerRegion at a given random address. We group pages to |
| 58 // not spread out too much over the address space which would blow |
| 59 // away the page tables and lead to bad performance. |
| 60 const size_t blinkPagesPerRegion = 10; |
| 61 |
55 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte | 62 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte |
56 // align all allocations even on 32 bit. | 63 // align all allocations even on 32 bit. |
57 const size_t allocationGranularity = 8; | 64 const size_t allocationGranularity = 8; |
58 const size_t allocationMask = allocationGranularity - 1; | 65 const size_t allocationMask = allocationGranularity - 1; |
59 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit
y) - 1)) / (8 * allocationGranularity); | 66 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit
y) - 1)) / (8 * allocationGranularity); |
60 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask)
& ~allocationMask); | 67 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask)
& ~allocationMask); |
61 const size_t maxHeapObjectSize = 1 << 27; | 68 const size_t maxHeapObjectSize = 1 << 27; |
62 | 69 |
63 const size_t markBitMask = 1; | 70 const size_t markBitMask = 1; |
64 const size_t freeListMask = 2; | 71 const size_t freeListMask = 2; |
(...skipping 751 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
816 | 823 |
817 ThreadState* threadState() { return m_threadState; } | 824 ThreadState* threadState() { return m_threadState; } |
818 HeapStats& stats() { return m_threadState->stats(); } | 825 HeapStats& stats() { return m_threadState->stats(); } |
819 void flushHeapContainsCache() | 826 void flushHeapContainsCache() |
820 { | 827 { |
821 m_threadState->heapContainsCache()->flush(); | 828 m_threadState->heapContainsCache()->flush(); |
822 } | 829 } |
823 | 830 |
824 inline Address allocate(size_t, const GCInfo*); | 831 inline Address allocate(size_t, const GCInfo*); |
825 void addToFreeList(Address, size_t); | 832 void addToFreeList(Address, size_t); |
| 833 void addPageMemoryToPool(PageMemory*); |
826 void addPageToPool(HeapPage<Header>*); | 834 void addPageToPool(HeapPage<Header>*); |
827 inline static size_t roundedAllocationSize(size_t size) | 835 inline static size_t roundedAllocationSize(size_t size) |
828 { | 836 { |
829 return allocationSizeFromSize(size) - sizeof(Header); | 837 return allocationSizeFromSize(size) - sizeof(Header); |
830 } | 838 } |
831 | 839 |
832 private: | 840 private: |
833 // Once pages have been used for one thread heap they will never | 841 // Once pages have been used for one thread heap they will never |
834 // be reused for another thread heap. Instead of unmapping, we add | 842 // be reused for another thread heap. Instead of unmapping, we add |
835 // the pages to a pool of pages to be reused later by this thread | 843 // the pages to a pool of pages to be reused later by this thread |
(...skipping 1501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2337 }; | 2345 }; |
2338 | 2346 |
2339 template<typename T> | 2347 template<typename T> |
2340 struct IfWeakMember<WeakMember<T> > { | 2348 struct IfWeakMember<WeakMember<T> > { |
2341 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } | 2349 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } |
2342 }; | 2350 }; |
2343 | 2351 |
2344 } | 2352 } |
2345 | 2353 |
2346 #endif // Heap_h | 2354 #endif // Heap_h |
OLD | NEW |