| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 29 matching lines...) Expand all Loading... |
| 40 #include "wtf/PartitionAlloc.h" | 40 #include "wtf/PartitionAlloc.h" |
| 41 #include "wtf/Partitions.h" | 41 #include "wtf/Partitions.h" |
| 42 | 42 |
| 43 #include <string.h> | 43 #include <string.h> |
| 44 | 44 |
| 45 namespace WTF { | 45 namespace WTF { |
| 46 | 46 |
| 47 class PartitionAllocatorDummyVisitor; | 47 class PartitionAllocatorDummyVisitor; |
| 48 | 48 |
| 49 class WTF_EXPORT PartitionAllocator { | 49 class WTF_EXPORT PartitionAllocator { |
| 50 public: | 50 public: |
| 51 typedef PartitionAllocatorDummyVisitor Visitor; | 51 typedef PartitionAllocatorDummyVisitor Visitor; |
| 52 static const bool isGarbageCollected = false; | 52 static const bool isGarbageCollected = false; |
| 53 | 53 |
| 54 template<typename T> | 54 template <typename T> |
| 55 static size_t quantizedSize(size_t count) | 55 static size_t quantizedSize(size_t count) { |
| 56 { | 56 RELEASE_ASSERT(count <= kGenericMaxDirectMapped / sizeof(T)); |
| 57 RELEASE_ASSERT(count <= kGenericMaxDirectMapped / sizeof(T)); | 57 return partitionAllocActualSize(Partitions::bufferPartition(), count * sizeo
f(T)); |
| 58 return partitionAllocActualSize(Partitions::bufferPartition(), count * s
izeof(T)); | 58 } |
| 59 } | 59 template <typename T> |
| 60 template <typename T> | 60 static T* allocateVectorBacking(size_t size) { |
| 61 static T* allocateVectorBacking(size_t size) | 61 return reinterpret_cast<T*>(allocateBacking(size)); |
| 62 { | 62 } |
| 63 return reinterpret_cast<T*>(allocateBacking(size)); | 63 template <typename T> |
| 64 } | 64 static T* allocateExpandedVectorBacking(size_t size) { |
| 65 template <typename T> | 65 return reinterpret_cast<T*>(allocateBacking(size)); |
| 66 static T* allocateExpandedVectorBacking(size_t size) | 66 } |
| 67 { | 67 static void freeVectorBacking(void* address); |
| 68 return reinterpret_cast<T*>(allocateBacking(size)); | 68 static inline bool expandVectorBacking(void*, size_t) { |
| 69 } | 69 return false; |
| 70 static void freeVectorBacking(void* address); | 70 } |
| 71 static inline bool expandVectorBacking(void*, size_t) | 71 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurrentS
ize, size_t quantizedShrunkSize) { |
| 72 { | 72 // Optimization: if we're downsizing inside the same allocator bucket, |
| 73 return false; | 73 // we can skip reallocation. |
| 74 } | 74 return quantizedCurrentSize == quantizedShrunkSize; |
| 75 static inline bool shrinkVectorBacking(void* address, size_t quantizedCurren
tSize, size_t quantizedShrunkSize) | 75 } |
| 76 { | 76 template <typename T> |
| 77 // Optimization: if we're downsizing inside the same allocator bucket, | 77 static T* allocateInlineVectorBacking(size_t size) { return allocateVectorBack
ing<T>(size); } |
| 78 // we can skip reallocation. | 78 static inline void freeInlineVectorBacking(void* address) { freeVectorBacking(
address); } |
| 79 return quantizedCurrentSize == quantizedShrunkSize; | 79 static inline bool expandInlineVectorBacking(void*, size_t) { return false; } |
| 80 } | 80 static inline bool shrinkInlineVectorBacking(void* address, size_t quantizedCu
rrentSize, size_t quantizedShrunkSize) { return shrinkVectorBacking(address, qua
ntizedCurrentSize, quantizedShrunkSize); } |
| 81 template <typename T> | |
| 82 static T* allocateInlineVectorBacking(size_t size) { return allocateVectorBa
cking<T>(size); } | |
| 83 static inline void freeInlineVectorBacking(void* address) { freeVectorBackin
g(address); } | |
| 84 static inline bool expandInlineVectorBacking(void*, size_t) { return false;
} | |
| 85 static inline bool shrinkInlineVectorBacking(void* address, size_t quantized
CurrentSize, size_t quantizedShrunkSize) { return shrinkVectorBacking(address, q
uantizedCurrentSize, quantizedShrunkSize); } | |
| 86 | 81 |
| 87 template <typename T, typename HashTable> | 82 template <typename T, typename HashTable> |
| 88 static T* allocateHashTableBacking(size_t size) | 83 static T* allocateHashTableBacking(size_t size) { |
| 89 { | 84 return reinterpret_cast<T*>(allocateBacking(size)); |
| 90 return reinterpret_cast<T*>(allocateBacking(size)); | 85 } |
| 91 } | 86 template <typename T, typename HashTable> |
| 92 template <typename T, typename HashTable> | 87 static T* allocateZeroedHashTableBacking(size_t size) { |
| 93 static T* allocateZeroedHashTableBacking(size_t size) | 88 void* result = allocateBacking(size); |
| 94 { | 89 memset(result, 0, size); |
| 95 void* result = allocateBacking(size); | 90 return reinterpret_cast<T*>(result); |
| 96 memset(result, 0, size); | 91 } |
| 97 return reinterpret_cast<T*>(result); | 92 static void freeHashTableBacking(void* address); |
| 98 } | |
| 99 static void freeHashTableBacking(void* address); | |
| 100 | 93 |
| 101 template <typename Return, typename Metadata> | 94 template <typename Return, typename Metadata> |
| 102 static Return malloc(size_t size) | 95 static Return malloc(size_t size) { |
| 103 { | 96 return reinterpret_cast<Return>(Partitions::fastMalloc(size)); |
| 104 return reinterpret_cast<Return>(Partitions::fastMalloc(size)); | 97 } |
| 105 } | |
| 106 | 98 |
| 107 static inline bool expandHashTableBacking(void*, size_t) | 99 static inline bool expandHashTableBacking(void*, size_t) { |
| 108 { | 100 return false; |
| 109 return false; | 101 } |
| 110 } | 102 static void free(void* address) { |
| 111 static void free(void* address) | 103 Partitions::fastFree(address); |
| 112 { | 104 } |
| 113 Partitions::fastFree(address); | 105 template <typename T> |
| 114 } | 106 static void* newArray(size_t bytes) { |
| 115 template<typename T> | 107 return malloc<void*, void>(bytes); |
| 116 static void* newArray(size_t bytes) | 108 } |
| 117 { | 109 static void |
| 118 return malloc<void*, void>(bytes); | 110 deleteArray(void* ptr) { |
| 119 } | 111 free(ptr); // Not the system free, the one from this class. |
| 120 static void | 112 } |
| 121 deleteArray(void* ptr) | |
| 122 { | |
| 123 free(ptr); // Not the system free, the one from this class. | |
| 124 } | |
| 125 | 113 |
| 126 static bool isAllocationAllowed() { return true; } | 114 static bool isAllocationAllowed() { return true; } |
| 127 template<typename T> | 115 template <typename T> |
| 128 static bool isHeapObjectAlive(T* object) | 116 static bool isHeapObjectAlive(T* object) { |
| 129 { | 117 ASSERT_NOT_REACHED(); |
| 130 ASSERT_NOT_REACHED(); | 118 return false; |
| 131 return false; | 119 } |
| 132 } | |
| 133 | 120 |
| 134 static void markNoTracing(...) | 121 static void markNoTracing(...) { |
| 135 { | 122 ASSERT_NOT_REACHED(); |
| 136 ASSERT_NOT_REACHED(); | 123 } |
| 137 } | |
| 138 | 124 |
| 139 static void registerDelayedMarkNoTracing(...) | 125 static void registerDelayedMarkNoTracing(...) { |
| 140 { | 126 ASSERT_NOT_REACHED(); |
| 141 ASSERT_NOT_REACHED(); | 127 } |
| 142 } | |
| 143 | 128 |
| 144 static void registerWeakMembers(...) | 129 static void registerWeakMembers(...) { |
| 145 { | 130 ASSERT_NOT_REACHED(); |
| 146 ASSERT_NOT_REACHED(); | 131 } |
| 147 } | |
| 148 | 132 |
| 149 static void registerWeakTable(...) | 133 static void registerWeakTable(...) { |
| 150 { | 134 ASSERT_NOT_REACHED(); |
| 151 ASSERT_NOT_REACHED(); | 135 } |
| 152 } | |
| 153 | 136 |
| 154 #if ENABLE(ASSERT) | 137 #if ENABLE(ASSERT) |
| 155 static bool weakTableRegistered(...) | 138 static bool weakTableRegistered(...) { |
| 156 { | 139 ASSERT_NOT_REACHED(); |
| 157 ASSERT_NOT_REACHED(); | 140 return false; |
| 158 return false; | 141 } |
| 159 } | |
| 160 #endif | 142 #endif |
| 161 | 143 |
| 162 template<typename T, typename Traits> | 144 template <typename T, typename Traits> |
| 163 static void trace(...) | 145 static void trace(...) { |
| 164 { | 146 ASSERT_NOT_REACHED(); |
| 165 ASSERT_NOT_REACHED(); | 147 } |
| 166 } | |
| 167 | 148 |
| 168 template<typename T> | 149 template <typename T> |
| 169 struct OtherType { | 150 struct OtherType { |
| 170 typedef T* Type; | 151 typedef T* Type; |
| 171 }; | 152 }; |
| 172 | 153 |
| 173 template<typename T> | 154 template <typename T> |
| 174 static T& getOther(T* other) | 155 static T& getOther(T* other) { |
| 175 { | 156 return *other; |
| 176 return *other; | 157 } |
| 177 } | |
| 178 | 158 |
| 179 static void enterGCForbiddenScope() { } | 159 static void enterGCForbiddenScope() {} |
| 180 static void leaveGCForbiddenScope() { } | 160 static void leaveGCForbiddenScope() {} |
| 181 | 161 |
| 182 private: | 162 private: |
| 183 static void* allocateBacking(size_t); | 163 static void* allocateBacking(size_t); |
| 184 }; | 164 }; |
| 185 | 165 |
| 186 // The Windows compiler seems to be very eager to instantiate things it won't | 166 // The Windows compiler seems to be very eager to instantiate things it won't |
| 187 // need, so unless we have this class we get compile errors. | 167 // need, so unless we have this class we get compile errors. |
| 188 class PartitionAllocatorDummyVisitor { | 168 class PartitionAllocatorDummyVisitor { |
| 189 public: | 169 public: |
| 190 template<typename T> inline bool isHeapObjectAlive(T obj) | 170 template <typename T> |
| 191 { | 171 inline bool isHeapObjectAlive(T obj) { |
| 192 ASSERT_NOT_REACHED(); | 172 ASSERT_NOT_REACHED(); |
| 193 return false; | 173 return false; |
| 194 } | 174 } |
| 195 }; | 175 }; |
| 196 | 176 |
| 197 } // namespace WTF | 177 } // namespace WTF |
| 198 | 178 |
| 199 #define WTF_USE_ALLOCATOR(ClassName, Allocator) \ | 179 #define WTF_USE_ALLOCATOR(ClassName, Allocator)
\ |
| 200 public: \ | 180 public:
\ |
| 201 void* operator new(size_t size) \ | 181 void* operator new(size_t size) {
\ |
| 202 { \ | 182 return Allocator::template malloc<void*, ClassName>(size);
\ |
| 203 return Allocator::template malloc<void*, ClassName>(size); \ | 183 }
\ |
| 204 } \ | 184 void operator delete(void* p) { Allocator::free(p); }
\ |
| 205 void operator delete(void* p) { Allocator::free(p); } \ | 185 void* operator new[](size_t size) { return Allocator::template newArray<ClassN
ame>(size); } \ |
| 206 void* operator new[](size_t size) { return Allocator::template newArray<Clas
sName>(size); } \ | 186 void operator delete[](void* p) { Allocator::deleteArray(p); }
\ |
| 207 void operator delete[](void* p) { Allocator::deleteArray(p); } \ | 187 void* operator new(size_t, NotNullTag, void* location) {
\ |
| 208 void* operator new(size_t, NotNullTag, void* location) \ | 188 ASSERT(location);
\ |
| 209 { \ | 189 return location;
\ |
| 210 ASSERT(location); \ | 190 }
\ |
| 211 return location; \ | 191
\ |
| 212 } \ | 192 private:
\ |
| 213 private: \ | 193 typedef int __thisIsHereToForceASemicolonAfterThisMacro |
| 214 typedef int __thisIsHereToForceASemicolonAfterThisMacro | |
| 215 | 194 |
| 216 using WTF::PartitionAllocator; | 195 using WTF::PartitionAllocator; |
| 217 | 196 |
| 218 #endif // WTF_PartitionAllocator_h | 197 #endif // WTF_PartitionAllocator_h |
| OLD | NEW |