Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1357)

Side by Side Diff: Source/platform/heap/HeapPage.h

Issue 1314793002: Oilpan: Split Heap.h into two files (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.cpp ('k') | Source/platform/heap/HeapPage.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 10 matching lines...) Expand all
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31 #ifndef Heap_h 31 #ifndef HeapPage_h
32 #define Heap_h 32 #define HeapPage_h
33 33
34 #include "platform/PlatformExport.h" 34 #include "platform/PlatformExport.h"
35 #include "platform/heap/GCInfo.h" 35 #include "platform/heap/GCInfo.h"
36 #include "platform/heap/ThreadState.h" 36 #include "platform/heap/ThreadState.h"
37 #include "platform/heap/Visitor.h" 37 #include "platform/heap/Visitor.h"
38 #include "public/platform/WebThread.h"
39 #include "wtf/AddressSanitizer.h" 38 #include "wtf/AddressSanitizer.h"
40 #include "wtf/Assertions.h" 39 #include "wtf/Assertions.h"
41 #include "wtf/Atomics.h" 40 #include "wtf/Atomics.h"
42 #include "wtf/ContainerAnnotations.h" 41 #include "wtf/ContainerAnnotations.h"
43 #include "wtf/Forward.h" 42 #include "wtf/Forward.h"
44 #include "wtf/PageAllocator.h" 43 #include "wtf/PageAllocator.h"
45 #include <stdint.h> 44 #include <stdint.h>
46 45
47 namespace blink { 46 namespace blink {
48 47
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
100 FreeList::zapFreedMemory(address, size); \ 99 FreeList::zapFreedMemory(address, size); \
101 ASAN_POISON_MEMORY_REGION(address, size) 100 ASAN_POISON_MEMORY_REGION(address, size)
102 #define SET_MEMORY_ACCESSIBLE(address, size) \ 101 #define SET_MEMORY_ACCESSIBLE(address, size) \
103 ASAN_UNPOISON_MEMORY_REGION(address, size); \ 102 ASAN_UNPOISON_MEMORY_REGION(address, size); \
104 memset((address), 0, (size)) 103 memset((address), 0, (size))
105 #else 104 #else
106 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) 105 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
107 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false) 106 #define SET_MEMORY_ACCESSIBLE(address, size) do { } while (false)
108 #endif 107 #endif
109 108
109 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILING) && CPU(64BIT)
110 #define USE_4BYTE_HEADER_PADDING 1
111 #else
112 #define USE_4BYTE_HEADER_PADDING 0
113 #endif
114
110 class CallbackStack; 115 class CallbackStack;
111 class FreePagePool; 116 class FreePagePool;
112 class NormalPageHeap; 117 class NormalPageHeap;
113 class OrphanedPagePool; 118 class OrphanedPagePool;
114 class PageMemory; 119 class PageMemory;
115 class PageMemoryRegion; 120 class PageMemoryRegion;
116 class WebProcessMemoryDump; 121 class WebProcessMemoryDump;
117 122
118 #if ENABLE(GC_PROFILING) 123 #if ENABLE(GC_PROFILING)
119 class TracedValue; 124 class TracedValue;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
224 size_t age() const { return m_age; } 229 size_t age() const { return m_age; }
225 230
226 NO_SANITIZE_ADDRESS 231 NO_SANITIZE_ADDRESS
227 void incrementAge() 232 void incrementAge()
228 { 233 {
229 if (m_age < maxHeapObjectAge) 234 if (m_age < maxHeapObjectAge)
230 m_age++; 235 m_age++;
231 } 236 }
232 #endif 237 #endif
233 238
234 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILING) && CPU(64BIT)
235 // This method is needed just to avoid compilers from removing m_padding.
236 uint64_t unusedMethod() const { return m_padding; }
237 #endif
238
239 private: 239 private:
240 uint32_t m_encoded; 240 uint32_t m_encoded;
241 #if ENABLE(ASSERT) 241 #if ENABLE(ASSERT)
242 uint16_t m_magic; 242 uint16_t m_magic;
243 #endif 243 #endif
244 #if ENABLE(GC_PROFILING) 244 #if ENABLE(GC_PROFILING)
245 uint8_t m_age; 245 uint8_t m_age;
246 #endif 246 #endif
247 247
248 // In 64 bit architectures, we intentionally add 4 byte padding immediately 248 // In 64 bit architectures, we intentionally add 4 byte padding immediately
249 // after the HeapHeaderObject. This is because: 249 // after the HeapHeaderObject. This is because:
250 // 250 //
251 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) | 251 // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n by te) |
252 // ^8 byte aligned ^8 byte aligned 252 // ^8 byte aligned ^8 byte aligned
253 // 253 //
254 // is better than: 254 // is better than:
255 // 255 //
256 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) | 256 // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 by te) |
257 // ^4 byte aligned ^8 byte aligned ^4 byte aligned 257 // ^4 byte aligned ^8 byte aligned ^4 byte aligned
258 // 258 //
259 // since the former layout aligns both header and payload to 8 byte. 259 // since the former layout aligns both header and payload to 8 byte.
260 #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILING) && CPU(64BIT) 260 #if USE_4BYTE_HEADER_PADDING
261 public:
261 uint32_t m_padding; 262 uint32_t m_padding;
262 #endif 263 #endif
263 }; 264 };
264 265
265 class FreeListEntry final : public HeapObjectHeader { 266 class FreeListEntry final : public HeapObjectHeader {
266 public: 267 public:
267 NO_SANITIZE_ADDRESS 268 NO_SANITIZE_ADDRESS
268 explicit FreeListEntry(size_t size) 269 explicit FreeListEntry(size_t size)
269 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) 270 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader)
270 , m_next(nullptr) 271 , m_next(nullptr)
(...skipping 543 matching lines...) Expand 10 before | Expand all | Expand 10 after
814 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our 815 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
815 // typed heaps. This is only exported to enable tests in HeapTest.cpp. 816 // typed heaps. This is only exported to enable tests in HeapTest.cpp.
816 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) 817 PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object)
817 { 818 {
818 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); 819 Address address = reinterpret_cast<Address>(const_cast<void*>(object));
819 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + bli nkGuardPageSize); 820 BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + bli nkGuardPageSize);
820 ASSERT(page->contains(address)); 821 ASSERT(page->contains(address));
821 return page; 822 return page;
822 } 823 }
823 824
824 template<typename T, bool = NeedsAdjustAndMark<T>::value> class ObjectAliveTrait ;
825
826 template<typename T>
827 class ObjectAliveTrait<T, false> {
828 public:
829 static bool isHeapObjectAlive(T* object)
830 {
831 static_assert(sizeof(T), "T must be fully defined");
832 return HeapObjectHeader::fromPayload(object)->isMarked();
833 }
834 };
835
836 template<typename T>
837 class ObjectAliveTrait<T, true> {
838 public:
839 static bool isHeapObjectAlive(T* object)
840 {
841 static_assert(sizeof(T), "T must be fully defined");
842 return object->isHeapObjectAlive();
843 }
844 };
845
846 class PLATFORM_EXPORT Heap {
847 public:
848 static void init();
849 static void shutdown();
850 static void doShutdown();
851
852 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
853 static BasePage* findPageFromAddress(Address);
854 static BasePage* findPageFromAddress(const void* pointer) { return findPageF romAddress(reinterpret_cast<Address>(const_cast<void*>(pointer))); }
855 #endif
856
857 template<typename T>
858 static inline bool isHeapObjectAlive(T* object)
859 {
860 static_assert(sizeof(T), "T must be fully defined");
861 // The strongification of collections relies on the fact that once a
862 // collection has been strongified, there is no way that it can contain
863 // non-live entries, so no entries will be removed. Since you can't set
864 // the mark bit on a null pointer, that means that null pointers are
865 // always 'alive'.
866 if (!object)
867 return true;
868 return ObjectAliveTrait<T>::isHeapObjectAlive(object);
869 }
870 template<typename T>
871 static inline bool isHeapObjectAlive(const Member<T>& member)
872 {
873 return isHeapObjectAlive(member.get());
874 }
875 template<typename T>
876 static inline bool isHeapObjectAlive(const WeakMember<T>& member)
877 {
878 return isHeapObjectAlive(member.get());
879 }
880 template<typename T>
881 static inline bool isHeapObjectAlive(const RawPtr<T>& ptr)
882 {
883 return isHeapObjectAlive(ptr.get());
884 }
885
886 // Is the finalizable GC object still alive, but slated for lazy sweeping?
887 // If a lazy sweep is in progress, returns true if the object was found
888 // to be not reachable during the marking phase, but it has yet to be swept
889 // and finalized. The predicate returns false in all other cases.
890 //
891 // Holding a reference to an already-dead object is not a valid state
892 // to be in; willObjectBeLazilySwept() has undefined behavior if passed
893 // such a reference.
894 template<typename T>
895 NO_LAZY_SWEEP_SANITIZE_ADDRESS
896 static bool willObjectBeLazilySwept(const T* objectPointer)
897 {
898 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used.");
899 #if ENABLE(LAZY_SWEEPING)
900 BasePage* page = pageFromObject(objectPointer);
901 if (page->hasBeenSwept())
902 return false;
903 ASSERT(page->heap()->threadState()->isSweepingInProgress());
904
905 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer));
906 #else
907 return false;
908 #endif
909 }
910
911 // Push a trace callback on the marking stack.
912 static void pushTraceCallback(void* containerObject, TraceCallback);
913
914 // Push a trace callback on the post-marking callback stack. These
915 // callbacks are called after normal marking (including ephemeron
916 // iteration).
917 static void pushPostMarkingCallback(void*, TraceCallback);
918
919 // Add a weak pointer callback to the weak callback work list. General
920 // object pointer callbacks are added to a thread local weak callback work
921 // list and the callback is called on the thread that owns the object, with
922 // the closure pointer as an argument. Most of the time, the closure and
923 // the containerObject can be the same thing, but the containerObject is
924 // constrained to be on the heap, since the heap is used to identify the
925 // correct thread.
926 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback);
927
928 // Similar to the more general pushThreadLocalWeakCallback, but cell
929 // pointer callbacks are added to a static callback work list and the weak
930 // callback is performed on the thread performing garbage collection. This
931 // is OK because cells are just cleared and no deallocation can happen.
932 static void pushGlobalWeakCallback(void** cell, WeakCallback);
933
934 // Pop the top of a marking stack and call the callback with the visitor
935 // and the object. Returns false when there is nothing more to do.
936 static bool popAndInvokeTraceCallback(Visitor*);
937
938 // Remove an item from the post-marking callback stack and call
939 // the callback with the visitor and the object pointer. Returns
940 // false when there is nothing more to do.
941 static bool popAndInvokePostMarkingCallback(Visitor*);
942
943 // Remove an item from the weak callback work list and call the callback
944 // with the visitor and the closure pointer. Returns false when there is
945 // nothing more to do.
946 static bool popAndInvokeGlobalWeakCallback(Visitor*);
947
948 // Register an ephemeron table for fixed-point iteration.
949 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
950 #if ENABLE(ASSERT)
951 static bool weakTableRegistered(const void*);
952 #endif
953
954 static inline size_t allocationSizeFromSize(size_t size)
955 {
956 // Check the size before computing the actual allocation size. The
957 // allocation size calculation can overflow for large sizes and the chec k
958 // therefore has to happen before any calculation on the size.
959 RELEASE_ASSERT(size < maxHeapObjectSize);
960
961 // Add space for header.
962 size_t allocationSize = size + sizeof(HeapObjectHeader);
963 // Align size with allocation granularity.
964 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
965 return allocationSize;
966 }
967 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex);
968 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
969 template<typename T> static Address reallocate(void* previous, size_t);
970
971 enum GCReason {
972 IdleGC,
973 PreciseGC,
974 ConservativeGC,
975 ForcedGC,
976 NumberOfGCReason
977 };
978 static const char* gcReasonString(GCReason);
979 static void collectGarbage(ThreadState::StackState, ThreadState::GCType, GCR eason);
980 static void collectGarbageForTerminatingThread(ThreadState*);
981 static void collectAllGarbage();
982
983 static void processMarkingStack(Visitor*);
984 static void postMarkingProcessing(Visitor*);
985 static void globalWeakProcessing(Visitor*);
986 static void setForcePreciseGCForTesting();
987
988 static void preGC();
989 static void postGC(ThreadState::GCType);
990
991 // Conservatively checks whether an address is a pointer in any of the
992 // thread heaps. If so marks the object pointed to as live.
993 static Address checkAndMarkPointer(Visitor*, Address);
994
995 #if ENABLE(GC_PROFILING)
996 // Dump the path to specified object on the next GC. This method is to be
997 // invoked from GDB.
998 static void dumpPathToObjectOnNextGC(void* p);
999
1000 // Forcibly find GCInfo of the object at Address. This is slow and should
1001 // only be used for debug purposes. It involves finding the heap page and
1002 // scanning the heap page for an object header.
1003 static const GCInfo* findGCInfo(Address);
1004
1005 static String createBacktraceString();
1006 #endif
1007
1008 static size_t objectPayloadSizeForTesting();
1009
1010 static void flushHeapDoesNotContainCache();
1011
1012 static FreePagePool* freePagePool() { return s_freePagePool; }
1013 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
1014
1015 // This look-up uses the region search tree and a negative contains cache to
1016 // provide an efficient mapping from arbitrary addresses to the containing
1017 // heap-page if one exists.
1018 static BasePage* lookup(Address);
1019 static void addPageMemoryRegion(PageMemoryRegion*);
1020 static void removePageMemoryRegion(PageMemoryRegion*);
1021
1022 static const GCInfo* gcInfo(size_t gcInfoIndex)
1023 {
1024 ASSERT(gcInfoIndex >= 1);
1025 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
1026 ASSERT(s_gcInfoTable);
1027 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
1028 ASSERT(info);
1029 return info;
1030 }
1031
1032 static void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseSto re(&s_markedObjectSizeAtLastCompleteSweep, size); }
1033 static size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&s_ markedObjectSizeAtLastCompleteSweep); }
1034 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); }
1035 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); }
1036 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); }
1037 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); }
1038 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); }
1039 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); }
1040 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); }
1041 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); }
1042 static size_t objectSizeAtLastGC() { return acquireLoad(&s_objectSizeAtLastG C); }
1043 static void increasePersistentCount(size_t delta) { atomicAdd(&s_persistentC ount, static_cast<long>(delta)); }
1044 static void decreasePersistentCount(size_t delta) { atomicSubtract(&s_persis tentCount, static_cast<long>(delta)); }
1045 static size_t persistentCount() { return acquireLoad(&s_persistentCount); }
1046 static size_t persistentCountAtLastGC() { return acquireLoad(&s_persistentCo untAtLastGC); }
1047 static void increaseCollectedPersistentCount(size_t delta) { atomicAdd(&s_co llectedPersistentCount, static_cast<long>(delta)); }
1048 static size_t collectedPersistentCount() { return acquireLoad(&s_collectedPe rsistentCount); }
1049 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition AllocSizeAtLastGC); }
1050
1051 static double estimatedMarkingTime();
1052 static void reportMemoryUsageHistogram();
1053 static void reportMemoryUsageForTracing();
1054
1055 private:
1056 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted
1057 // by base addresses.
1058 class RegionTree {
1059 public:
1060 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { }
1061 ~RegionTree()
1062 {
1063 delete m_left;
1064 delete m_right;
1065 }
1066 PageMemoryRegion* lookup(Address);
1067 static void add(RegionTree*, RegionTree**);
1068 static void remove(PageMemoryRegion*, RegionTree**);
1069 private:
1070 PageMemoryRegion* m_region;
1071 RegionTree* m_left;
1072 RegionTree* m_right;
1073 };
1074
1075 // Reset counters that track live and allocated-since-last-GC sizes.
1076 static void resetHeapCounters();
1077
1078 static int heapIndexForObjectSize(size_t);
1079 static bool isNormalHeapIndex(int);
1080
1081 static CallbackStack* s_markingStack;
1082 static CallbackStack* s_postMarkingCallbackStack;
1083 static CallbackStack* s_globalWeakCallbackStack;
1084 static CallbackStack* s_ephemeronStack;
1085 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1086 static bool s_shutdownCalled;
1087 static FreePagePool* s_freePagePool;
1088 static OrphanedPagePool* s_orphanedPagePool;
1089 static RegionTree* s_regionTree;
1090 static size_t s_allocatedSpace;
1091 static size_t s_allocatedObjectSize;
1092 static size_t s_objectSizeAtLastGC;
1093 static size_t s_markedObjectSize;
1094 static size_t s_markedObjectSizeAtLastCompleteSweep;
1095 static size_t s_persistentCount;
1096 static size_t s_persistentCountAtLastGC;
1097 static size_t s_collectedPersistentCount;
1098 static size_t s_partitionAllocSizeAtLastGC;
1099 static double s_estimatedMarkingTimePerByte;
1100
1101 friend class ThreadState;
1102 };
1103
1104 template<typename T>
1105 struct IsEagerlyFinalizedType {
1106 private:
1107 typedef char YesType;
1108 struct NoType {
1109 char padding[8];
1110 };
1111
1112 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*);
1113 template <typename U> static NoType checkMarker(...);
1114
1115 public:
1116 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ;
1117 };
1118
1119 template<typename T> class GarbageCollected {
1120 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1121
1122 // For now direct allocation of arrays on the heap is not allowed.
1123 void* operator new[](size_t size);
1124
1125 #if OS(WIN) && COMPILER(MSVC)
1126 // Due to some quirkiness in the MSVC compiler we have to provide
1127 // the delete[] operator in the GarbageCollected subclasses as it
1128 // is called when a class is exported in a DLL.
1129 protected:
1130 void operator delete[](void* p)
1131 {
1132 ASSERT_NOT_REACHED();
1133 }
1134 #else
1135 void operator delete[](void* p);
1136 #endif
1137
1138 public:
1139 using GarbageCollectedBase = T;
1140
1141 void* operator new(size_t size)
1142 {
1143 return allocateObject(size, IsEagerlyFinalizedType<T>::value);
1144 }
1145
1146 static void* allocateObject(size_t size, bool eagerlySweep)
1147 {
1148 return Heap::allocate<T>(size, eagerlySweep);
1149 }
1150
1151 void operator delete(void* p)
1152 {
1153 ASSERT_NOT_REACHED();
1154 }
1155
1156 protected:
1157 GarbageCollected()
1158 {
1159 }
1160 };
1161
1162 // Assigning class types to their heaps.
1163 //
1164 // We use sized heaps for most 'normal' objects to improve memory locality.
1165 // It seems that the same type of objects are likely to be accessed together,
1166 // which means that we want to group objects by type. That's one reason
1167 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue),
1168 // but it's not practical to prepare dedicated heaps for all types.
1169 // Thus we group objects by their sizes, hoping that this will approximately
1170 // group objects by their types.
1171 //
1172 // An exception to the use of sized heaps is made for class types that
1173 // require prompt finalization after a garbage collection. That is, their
1174 // instances have to be finalized early and cannot be delayed until lazy
1175 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE()
1176 // macro is used to declare a class (and its derived classes) as being
1177 // in need of eager finalization. Must be defined with 'public' visibility
1178 // for a class.
1179 //
1180
1181 inline int Heap::heapIndexForObjectSize(size_t size)
1182 {
1183 if (size < 64) {
1184 if (size < 32)
1185 return ThreadState::NormalPage1HeapIndex;
1186 return ThreadState::NormalPage2HeapIndex;
1187 }
1188 if (size < 128)
1189 return ThreadState::NormalPage3HeapIndex;
1190 return ThreadState::NormalPage4HeapIndex;
1191 }
1192
1193 inline bool Heap::isNormalHeapIndex(int index)
1194 {
1195 return index >= ThreadState::NormalPage1HeapIndex && index <= ThreadState::N ormalPage4HeapIndex;
1196 }
1197
1198 #define DECLARE_EAGER_FINALIZATION_OPERATOR_NEW() \
1199 public: \
1200 GC_PLUGIN_IGNORE("491488") \
1201 void* operator new(size_t size) \
1202 { \
1203 return allocateObject(size, true); \
1204 }
1205
1206 #define IS_EAGERLY_FINALIZED() (pageFromObject(this)->heap()->heapIndex() == Thr eadState::EagerSweepHeapIndex)
1207 #if ENABLE(ASSERT) && ENABLE(OILPAN)
1208 class VerifyEagerFinalization {
1209 public:
1210 ~VerifyEagerFinalization()
1211 {
1212 // If this assert triggers, the class annotated as eagerly
1213 // finalized ended up not being allocated on the heap
1214 // set aside for eager finalization. The reason is most
1215 // likely that the effective 'operator new' overload for
1216 // this class' leftmost base is for a class that is not
1217 // eagerly finalized. Declaring and defining an 'operator new'
1218 // for this class is what's required -- consider using
1219 // DECLARE_EAGER_FINALIZATION_OPERATOR_NEW().
1220 ASSERT(IS_EAGERLY_FINALIZED());
1221 }
1222 };
1223 #define EAGERLY_FINALIZE() \
1224 private: \
1225 VerifyEagerFinalization m_verifyEagerFinalization; \
1226 public: \
1227 typedef int IsEagerlyFinalizedMarker
1228 #else
1229 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker
1230 #endif
1231
1232 #if !ENABLE(OILPAN) && ENABLE(LAZY_SWEEPING)
1233 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE()
1234 #else
1235 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
1236 #endif
1237
1238 NO_SANITIZE_ADDRESS inline 825 NO_SANITIZE_ADDRESS inline
1239 size_t HeapObjectHeader::size() const 826 size_t HeapObjectHeader::size() const
1240 { 827 {
1241 size_t result = m_encoded & headerSizeMask; 828 size_t result = m_encoded & headerSizeMask;
1242 // Large objects should not refer to header->size(). 829 // Large objects should not refer to header->size().
1243 // The actual size of a large object is stored in 830 // The actual size of a large object is stored in
1244 // LargeObjectPage::m_payloadSize. 831 // LargeObjectPage::m_payloadSize.
1245 ASSERT(result != largeObjectSizeInHeader); 832 ASSERT(result != largeObjectSizeInHeader);
1246 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 833 ASSERT(!pageFromObject(this)->isLargeObjectPage());
1247 return result; 834 return result;
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
1342 Address result = headerAddress + sizeof(HeapObjectHeader); 929 Address result = headerAddress + sizeof(HeapObjectHeader);
1343 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 930 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1344 931
1345 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ; 932 SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)) ;
1346 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); 933 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1));
1347 return result; 934 return result;
1348 } 935 }
1349 return outOfLineAllocate(allocationSize, gcInfoIndex); 936 return outOfLineAllocate(allocationSize, gcInfoIndex);
1350 } 937 }
1351 938
1352 template<typename Derived>
1353 template<typename T>
1354 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object)
1355 {
1356 T** cell = reinterpret_cast<T**>(object);
1357 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell))
1358 *cell = nullptr;
1359 }
1360
1361 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex)
1362 {
1363 ASSERT(state->isAllocationAllowed());
1364 ASSERT(heapIndex != ThreadState::LargeObjectHeapIndex);
1365 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex));
1366 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex);
1367 }
1368
1369 template<typename T>
1370 Address Heap::allocate(size_t size, bool eagerlySweep)
1371 {
1372 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1373 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? ThreadState::Ea gerSweepHeapIndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index()) ;
1374 }
1375
1376 template<typename T>
1377 Address Heap::reallocate(void* previous, size_t size)
1378 {
1379 // Not intended to be a full C realloc() substitute;
1380 // realloc(nullptr, size) is not a supported alias for malloc(size).
1381
1382 // TODO(sof): promptly free the previous object.
1383 if (!size) {
1384 // If the new size is 0 this is considered equivalent to free(previous).
1385 return nullptr;
1386 }
1387
1388 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1389 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous);
1390 BasePage* page = pageFromObject(previousHeader);
1391 ASSERT(page);
1392 int heapIndex = page->heap()->heapIndex();
1393 // Recompute the effective heap index if previous allocation
1394 // was on the normal heaps or a large object.
1395 if (isNormalHeapIndex(heapIndex) || heapIndex == ThreadState::LargeObjectHea pIndex)
1396 heapIndex = heapIndexForObjectSize(size);
1397
1398 // TODO(haraken): We don't support reallocate() for finalizable objects.
1399 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
1400 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index());
1401 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr ait<T>::index());
1402 size_t copySize = previousHeader->payloadSize();
1403 if (copySize > size)
1404 copySize = size;
1405 memcpy(address, previous, copySize);
1406 return address;
1407 }
1408
1409 } // namespace blink 939 } // namespace blink
1410 940
1411 #endif // Heap_h 941 #endif // HeapPage_h
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.cpp ('k') | Source/platform/heap/HeapPage.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698