OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 1104 matching lines...) Loading... | |
1115 { | 1115 { |
1116 ASSERT_NOT_REACHED(); | 1116 ASSERT_NOT_REACHED(); |
1117 } | 1117 } |
1118 | 1118 |
1119 protected: | 1119 protected: |
1120 GarbageCollected() | 1120 GarbageCollected() |
1121 { | 1121 { |
1122 } | 1122 } |
1123 }; | 1123 }; |
1124 | 1124 |
1125 // We use sized heaps for normal pages to improve memory locality. | 1125 // Assigning class types to their heaps. |
1126 // | |
1127 // We use sized heaps for most 'normal' objcts to improve memory locality. | |
haraken
2015/05/11 01:20:27
objects
| |
1126 // It seems that the same type of objects are likely to be accessed together, | 1128 // It seems that the same type of objects are likely to be accessed together, |
1127 // which means that we want to group objects by type. That's why we provide | 1129 // which means that we want to group objects by type. That's one reason |
1128 // dedicated heaps for popular types (e.g., Node, CSSValue), but it's not | 1130 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), |
1129 // practical to prepare dedicated heaps for all types. Thus we group objects | 1131 // but it's not practical to prepare dedicated heaps for all types. |
1130 // by their sizes, hoping that it will approximately group objects | 1132 // Thus we group objects by their sizes, hoping that this will approximately |
1131 // by their types. | 1133 // group objects by their types. |
1132 static int heapIndexForNormalHeap(size_t size) | 1134 // |
1133 { | 1135 // An exception to the use of sized heaps is made for class types that |
1134 if (size < 64) { | 1136 // require prompt finalization after a garbage collection. That is, their |
1135 if (size < 32) | 1137 // instances have to be finalized early and cannot be delayed until lazy |
1138 // sweeping kicks in for their heap and page. The WILL_BE_EAGERLY_SWEPT() | |
1139 // macro is used to declare a class (and its derived classes) as being | |
1140 // in need of 'eager sweeping'. | |
1141 // | |
1142 template<typename T, typename Enabled = void> | |
1143 class HeapIndexTrait { | |
1144 public: | |
1145 static int heapIndexForObject(size_t size) | |
1146 { | |
1147 if (size < 64) { | |
1148 if (size < 32) | |
1136 return NormalPage1HeapIndex; | 1149 return NormalPage1HeapIndex; |
haraken
2015/05/11 01:20:27
Indent
| |
1137 return NormalPage2HeapIndex; | 1150 return NormalPage2HeapIndex; |
1151 } | |
1152 if (size < 128) | |
1153 return NormalPage3HeapIndex; | |
haraken
2015/05/11 01:20:27
Indent
| |
1154 return NormalPage4HeapIndex; | |
1138 } | 1155 } |
1139 if (size < 128) | 1156 }; |
1140 return NormalPage3HeapIndex; | 1157 |
1141 return NormalPage4HeapIndex; | 1158 #define WILL_BE_EAGERLY_SWEPT(TYPE) \ |
haraken
2015/05/11 01:20:27
WILL_BE_EAGERLY_SWEPT => EAGERLY_SWEPT ?
sof
2015/05/11 05:20:22
That doesn't give enough context about what this i
haraken
2015/05/11 06:44:07
XWillBeY means that it is X on non-oilpan and it i
sof
2015/05/19 15:00:15
Naming.
I've now prefixed OILPAN_ to give some ha
| |
1159 template<typename T> \ | |
1160 class HeapIndexTrait<T, typename WTF::EnableIf<WTF::IsSubclass<T, TYPE>::value>: :Type> { \ | |
1161 public: \ | |
1162 static int heapIndexForObject(size_t) \ | |
1163 { \ | |
1164 return EagerSweepHeapIndex; \ | |
1165 } \ | |
1142 } | 1166 } |
1143 | 1167 |
1144 NO_SANITIZE_ADDRESS inline | 1168 NO_SANITIZE_ADDRESS inline |
1145 size_t HeapObjectHeader::size() const | 1169 size_t HeapObjectHeader::size() const |
1146 { | 1170 { |
1147 size_t result = m_encoded & headerSizeMask; | 1171 size_t result = m_encoded & headerSizeMask; |
1148 // Large objects should not refer to header->size(). | 1172 // Large objects should not refer to header->size(). |
1149 // The actual size of a large object is stored in | 1173 // The actual size of a large object is stored in |
1150 // LargeObjectPage::m_payloadSize. | 1174 // LargeObjectPage::m_payloadSize. |
1151 ASSERT(result != largeObjectSizeInHeader); | 1175 ASSERT(result != largeObjectSizeInHeader); |
(...skipping 100 matching lines...) Loading... | |
1252 ASSERT(state->isAllocationAllowed()); | 1276 ASSERT(state->isAllocationAllowed()); |
1253 ASSERT(heapIndex != LargeObjectHeapIndex); | 1277 ASSERT(heapIndex != LargeObjectHeapIndex); |
1254 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 1278 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
1255 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1279 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
1256 } | 1280 } |
1257 | 1281 |
1258 template<typename T> | 1282 template<typename T> |
1259 Address Heap::allocate(size_t size) | 1283 Address Heap::allocate(size_t size) |
1260 { | 1284 { |
1261 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1285 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1262 return Heap::allocateOnHeapIndex(state, size, heapIndexForNormalHeap(size), GCInfoTrait<T>::index()); | 1286 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo rObject(size), GCInfoTrait<T>::index()); |
1263 } | 1287 } |
1264 | 1288 |
1265 template<typename T> | 1289 template<typename T> |
1266 Address Heap::reallocate(void* previous, size_t size) | 1290 Address Heap::reallocate(void* previous, size_t size) |
1267 { | 1291 { |
1268 if (!size) { | 1292 if (!size) { |
1269 // If the new size is 0 this is equivalent to either free(previous) or | 1293 // If the new size is 0 this is equivalent to either free(previous) or |
1270 // malloc(0). In both cases we do nothing and return nullptr. | 1294 // malloc(0). In both cases we do nothing and return nullptr. |
1271 return nullptr; | 1295 return nullptr; |
1272 } | 1296 } |
1273 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1297 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1274 // TODO(haraken): reallocate() should use the heap that the original object | 1298 // TODO(haraken): reallocate() should use the heap that the original object |
1275 // is using. This won't be a big deal since reallocate() is rarely used. | 1299 // is using. This won't be a big deal since reallocate() is rarely used. |
1276 Address address = Heap::allocateOnHeapIndex(state, size, heapIndexForNormalH eap(size), GCInfoTrait<T>::index()); | 1300 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>:: heapIndexForObject(size), GCInfoTrait<T>::index()); |
1277 if (!previous) { | 1301 if (!previous) { |
1278 // This is equivalent to malloc(size). | 1302 // This is equivalent to malloc(size). |
1279 return address; | 1303 return address; |
1280 } | 1304 } |
1281 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); | 1305 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); |
1282 // TODO(haraken): We don't support reallocate() for finalizable objects. | 1306 // TODO(haraken): We don't support reallocate() for finalizable objects. |
1283 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 1307 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
1284 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 1308 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
1285 size_t copySize = previousHeader->payloadSize(); | 1309 size_t copySize = previousHeader->payloadSize(); |
1286 if (copySize > size) | 1310 if (copySize > size) |
1287 copySize = size; | 1311 copySize = size; |
1288 memcpy(address, previous, copySize); | 1312 memcpy(address, previous, copySize); |
1289 return address; | 1313 return address; |
1290 } | 1314 } |
1291 | 1315 |
1292 } // namespace blink | 1316 } // namespace blink |
1293 | 1317 |
1294 #endif // Heap_h | 1318 #endif // Heap_h |
OLD | NEW |