Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 1057 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1068 { | 1068 { |
| 1069 ASSERT_NOT_REACHED(); | 1069 ASSERT_NOT_REACHED(); |
| 1070 } | 1070 } |
| 1071 | 1071 |
| 1072 protected: | 1072 protected: |
| 1073 GarbageCollected() | 1073 GarbageCollected() |
| 1074 { | 1074 { |
| 1075 } | 1075 } |
| 1076 }; | 1076 }; |
| 1077 | 1077 |
| 1078 // We use sized heaps for normal pages to improve memory locality. | 1078 // Assigning class types to their heaps. |
| 1079 // | |
| 1080 // We use sized heaps for most 'normal' objcts to improve memory locality. | |
| 1079 // It seems that the same type of objects are likely to be accessed together, | 1081 // It seems that the same type of objects are likely to be accessed together, |
| 1080 // which means that we want to group objects by type. That's why we provide | 1082 // which means that we want to group objects by type. That's one reason |
| 1081 // dedicated heaps for popular types (e.g., Node, CSSValue), but it's not | 1083 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), |
| 1082 // practical to prepare dedicated heaps for all types. Thus we group objects | 1084 // but it's not practical to prepare dedicated heaps for all types. |
| 1083 // by their sizes, hoping that it will approximately group objects | 1085 // Thus we group objects by their sizes, hoping that this will approximately |
| 1084 // by their types. | 1086 // group objects by their types. |
| 1085 static int heapIndexForNormalHeap(size_t size) | 1087 // |
| 1086 { | 1088 // An exception to the use of sized heaps is made for class types that |
| 1087 if (size < 64) { | 1089 // require prompt finalization after a garbage collection. That is, their |
| 1088 if (size < 32) | 1090 // instances have to be finalized early and cannot be delayed until lazy |
| 1091 // sweeping kicks in for their heap and page. The OILPAN_EAGERLY_SWEEP() | |
| 1092 // macro is used to declare a class (and its derived classes) as being | |
| 1093 // in need of 'eager sweeping'. | |
| 1094 // | |
| 1095 template<typename T, typename Enabled = void> | |
| 1096 class HeapIndexTrait { | |
| 1097 public: | |
| 1098 static int heapIndexForObject(size_t size) | |
| 1099 { | |
| 1100 if (size < 64) { | |
| 1101 if (size < 32) | |
| 1089 return NormalPage1HeapIndex; | 1102 return NormalPage1HeapIndex; |
| 1090 return NormalPage2HeapIndex; | 1103 return NormalPage2HeapIndex; |
|
haraken
2015/05/19 23:23:37
Fix indentation. Or:
return size < 32 ? NormalP
sof
2015/05/20 09:43:01
Done; sorry, bad editor configuration.
| |
| 1104 } | |
| 1105 if (size < 128) | |
| 1106 return NormalPage3HeapIndex; | |
| 1107 return NormalPage4HeapIndex; | |
|
haraken
2015/05/19 23:23:37
Ditto.
| |
| 1091 } | 1108 } |
| 1092 if (size < 128) | 1109 }; |
| 1093 return NormalPage3HeapIndex; | 1110 |
| 1094 return NormalPage4HeapIndex; | 1111 #define OILPAN_EAGERLY_SWEEP(TYPE) \ |
|
haraken
2015/05/19 23:23:37
Add a TODO to mention that the eager sweeping is a
haraken
2015/05/19 23:23:37
Hmm, we don't use a OILPAN_ prefix in other macros
sof
2015/05/20 09:43:00
That would the ideal outcome, but it is not clear
sof
2015/05/20 09:43:01
Not worth the time trying to change your mind abou
| |
| 1112 template<typename T> \ | |
| 1113 class HeapIndexTrait<T, typename WTF::EnableIf<WTF::IsSubclass<T, TYPE>::value>: :Type> { \ | |
| 1114 public: \ | |
| 1115 static int heapIndexForObject(size_t) \ | |
| 1116 { \ | |
| 1117 return EagerSweepHeapIndex; \ | |
| 1118 } \ | |
| 1095 } | 1119 } |
| 1096 | 1120 |
| 1097 NO_SANITIZE_ADDRESS inline | 1121 NO_SANITIZE_ADDRESS inline |
| 1098 size_t HeapObjectHeader::size() const | 1122 size_t HeapObjectHeader::size() const |
| 1099 { | 1123 { |
| 1100 size_t result = m_encoded & headerSizeMask; | 1124 size_t result = m_encoded & headerSizeMask; |
| 1101 // Large objects should not refer to header->size(). | 1125 // Large objects should not refer to header->size(). |
| 1102 // The actual size of a large object is stored in | 1126 // The actual size of a large object is stored in |
| 1103 // LargeObjectPage::m_payloadSize. | 1127 // LargeObjectPage::m_payloadSize. |
| 1104 ASSERT(result != largeObjectSizeInHeader); | 1128 ASSERT(result != largeObjectSizeInHeader); |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1205 ASSERT(state->isAllocationAllowed()); | 1229 ASSERT(state->isAllocationAllowed()); |
| 1206 ASSERT(heapIndex != LargeObjectHeapIndex); | 1230 ASSERT(heapIndex != LargeObjectHeapIndex); |
| 1207 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 1231 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
| 1208 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1232 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
| 1209 } | 1233 } |
| 1210 | 1234 |
| 1211 template<typename T> | 1235 template<typename T> |
| 1212 Address Heap::allocate(size_t size) | 1236 Address Heap::allocate(size_t size) |
| 1213 { | 1237 { |
| 1214 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1238 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1215 return Heap::allocateOnHeapIndex(state, size, heapIndexForNormalHeap(size), GCInfoTrait<T>::index()); | 1239 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo rObject(size), GCInfoTrait<T>::index()); |
| 1216 } | 1240 } |
| 1217 | 1241 |
| 1218 template<typename T> | 1242 template<typename T> |
| 1219 Address Heap::reallocate(void* previous, size_t size) | 1243 Address Heap::reallocate(void* previous, size_t size) |
| 1220 { | 1244 { |
| 1221 if (!size) { | 1245 if (!size) { |
| 1222 // If the new size is 0 this is equivalent to either free(previous) or | 1246 // If the new size is 0 this is equivalent to either free(previous) or |
| 1223 // malloc(0). In both cases we do nothing and return nullptr. | 1247 // malloc(0). In both cases we do nothing and return nullptr. |
| 1224 return nullptr; | 1248 return nullptr; |
| 1225 } | 1249 } |
| 1226 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1250 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1227 // TODO(haraken): reallocate() should use the heap that the original object | 1251 // TODO(haraken): reallocate() should use the heap that the original object |
| 1228 // is using. This won't be a big deal since reallocate() is rarely used. | 1252 // is using. This won't be a big deal since reallocate() is rarely used. |
| 1229 Address address = Heap::allocateOnHeapIndex(state, size, heapIndexForNormalH eap(size), GCInfoTrait<T>::index()); | 1253 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>:: heapIndexForObject(size), GCInfoTrait<T>::index()); |
| 1230 if (!previous) { | 1254 if (!previous) { |
| 1231 // This is equivalent to malloc(size). | 1255 // This is equivalent to malloc(size). |
| 1232 return address; | 1256 return address; |
| 1233 } | 1257 } |
| 1234 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); | 1258 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); |
| 1235 // TODO(haraken): We don't support reallocate() for finalizable objects. | 1259 // TODO(haraken): We don't support reallocate() for finalizable objects. |
| 1236 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 1260 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
| 1237 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 1261 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
| 1238 size_t copySize = previousHeader->payloadSize(); | 1262 size_t copySize = previousHeader->payloadSize(); |
| 1239 if (copySize > size) | 1263 if (copySize > size) |
| 1240 copySize = size; | 1264 copySize = size; |
| 1241 memcpy(address, previous, copySize); | 1265 memcpy(address, previous, copySize); |
| 1242 return address; | 1266 return address; |
| 1243 } | 1267 } |
| 1244 | 1268 |
| 1245 } // namespace blink | 1269 } // namespace blink |
| 1246 | 1270 |
| 1247 #endif // Heap_h | 1271 #endif // Heap_h |
| OLD | NEW |