OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 1057 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1068 { | 1068 { |
1069 ASSERT_NOT_REACHED(); | 1069 ASSERT_NOT_REACHED(); |
1070 } | 1070 } |
1071 | 1071 |
1072 protected: | 1072 protected: |
1073 GarbageCollected() | 1073 GarbageCollected() |
1074 { | 1074 { |
1075 } | 1075 } |
1076 }; | 1076 }; |
1077 | 1077 |
1078 // We use sized heaps for normal pages to improve memory locality. | 1078 // Assigning class types to their heaps. |
| 1079 // |
| 1080 // We use sized heaps for most 'normal' objcts to improve memory locality. |
1079 // It seems that the same type of objects are likely to be accessed together, | 1081 // It seems that the same type of objects are likely to be accessed together, |
1080 // which means that we want to group objects by type. That's why we provide | 1082 // which means that we want to group objects by type. That's one reason |
1081 // dedicated heaps for popular types (e.g., Node, CSSValue), but it's not | 1083 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), |
1082 // practical to prepare dedicated heaps for all types. Thus we group objects | 1084 // but it's not practical to prepare dedicated heaps for all types. |
1083 // by their sizes, hoping that it will approximately group objects | 1085 // Thus we group objects by their sizes, hoping that this will approximately |
1084 // by their types. | 1086 // group objects by their types. |
1085 static int heapIndexForNormalHeap(size_t size) | 1087 // |
1086 { | 1088 // An exception to the use of sized heaps is made for class types that |
1087 if (size < 64) { | 1089 // require prompt finalization after a garbage collection. That is, their |
1088 if (size < 32) | 1090 // instances have to be finalized early and cannot be delayed until lazy |
1089 return NormalPage1HeapIndex; | 1091 // sweeping kicks in for their heap and page. The EAGERLY_SWEEP() |
1090 return NormalPage2HeapIndex; | 1092 // macro is used to declare a class (and its derived classes) as being |
| 1093 // in need of 'eager sweeping'. |
| 1094 // |
| 1095 // TODO(Oilpan): the notion of eagerly swept object is at least needed |
| 1096 // during the transition to enabling Oilpan always. Once passed, re-evaluate |
| 1097 // if there is a need to keep this facility. |
| 1098 // |
| 1099 template<typename T, typename Enabled = void> |
| 1100 class HeapIndexTrait { |
| 1101 public: |
| 1102 static int heapIndexForObject(size_t size) |
| 1103 { |
| 1104 if (size < 64) { |
| 1105 if (size < 32) |
| 1106 return NormalPage1HeapIndex; |
| 1107 return NormalPage2HeapIndex; |
| 1108 } |
| 1109 if (size < 128) |
| 1110 return NormalPage3HeapIndex; |
| 1111 return NormalPage4HeapIndex; |
1091 } | 1112 } |
1092 if (size < 128) | 1113 }; |
1093 return NormalPage3HeapIndex; | 1114 |
1094 return NormalPage4HeapIndex; | 1115 #define EAGERLY_SWEEP(TYPE) \ |
| 1116 template<typename T> \ |
| 1117 class HeapIndexTrait<T, typename WTF::EnableIf<WTF::IsSubclass<T, TYPE>::value>:
:Type> { \ |
| 1118 public: \ |
| 1119 static int heapIndexForObject(size_t) \ |
| 1120 { \ |
| 1121 return EagerSweepHeapIndex; \ |
| 1122 } \ |
1095 } | 1123 } |
1096 | 1124 |
1097 NO_SANITIZE_ADDRESS inline | 1125 NO_SANITIZE_ADDRESS inline |
1098 size_t HeapObjectHeader::size() const | 1126 size_t HeapObjectHeader::size() const |
1099 { | 1127 { |
1100 size_t result = m_encoded & headerSizeMask; | 1128 size_t result = m_encoded & headerSizeMask; |
1101 // Large objects should not refer to header->size(). | 1129 // Large objects should not refer to header->size(). |
1102 // The actual size of a large object is stored in | 1130 // The actual size of a large object is stored in |
1103 // LargeObjectPage::m_payloadSize. | 1131 // LargeObjectPage::m_payloadSize. |
1104 ASSERT(result != largeObjectSizeInHeader); | 1132 ASSERT(result != largeObjectSizeInHeader); |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1205 ASSERT(state->isAllocationAllowed()); | 1233 ASSERT(state->isAllocationAllowed()); |
1206 ASSERT(heapIndex != LargeObjectHeapIndex); | 1234 ASSERT(heapIndex != LargeObjectHeapIndex); |
1207 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 1235 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
1208 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1236 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
1209 } | 1237 } |
1210 | 1238 |
1211 template<typename T> | 1239 template<typename T> |
1212 Address Heap::allocate(size_t size) | 1240 Address Heap::allocate(size_t size) |
1213 { | 1241 { |
1214 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1242 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1215 return Heap::allocateOnHeapIndex(state, size, heapIndexForNormalHeap(size),
GCInfoTrait<T>::index()); | 1243 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo
rObject(size), GCInfoTrait<T>::index()); |
1216 } | 1244 } |
1217 | 1245 |
1218 template<typename T> | 1246 template<typename T> |
1219 Address Heap::reallocate(void* previous, size_t size) | 1247 Address Heap::reallocate(void* previous, size_t size) |
1220 { | 1248 { |
1221 if (!size) { | 1249 if (!size) { |
1222 // If the new size is 0 this is equivalent to either free(previous) or | 1250 // If the new size is 0 this is equivalent to either free(previous) or |
1223 // malloc(0). In both cases we do nothing and return nullptr. | 1251 // malloc(0). In both cases we do nothing and return nullptr. |
1224 return nullptr; | 1252 return nullptr; |
1225 } | 1253 } |
1226 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1254 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1227 // TODO(haraken): reallocate() should use the heap that the original object | 1255 // TODO(haraken): reallocate() should use the heap that the original object |
1228 // is using. This won't be a big deal since reallocate() is rarely used. | 1256 // is using. This won't be a big deal since reallocate() is rarely used. |
1229 Address address = Heap::allocateOnHeapIndex(state, size, heapIndexForNormalH
eap(size), GCInfoTrait<T>::index()); | 1257 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::
heapIndexForObject(size), GCInfoTrait<T>::index()); |
1230 if (!previous) { | 1258 if (!previous) { |
1231 // This is equivalent to malloc(size). | 1259 // This is equivalent to malloc(size). |
1232 return address; | 1260 return address; |
1233 } | 1261 } |
1234 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); | 1262 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); |
1235 // TODO(haraken): We don't support reallocate() for finalizable objects. | 1263 // TODO(haraken): We don't support reallocate() for finalizable objects. |
1236 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 1264 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
1237 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 1265 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
1238 size_t copySize = previousHeader->payloadSize(); | 1266 size_t copySize = previousHeader->payloadSize(); |
1239 if (copySize > size) | 1267 if (copySize > size) |
1240 copySize = size; | 1268 copySize = size; |
1241 memcpy(address, previous, copySize); | 1269 memcpy(address, previous, copySize); |
1242 return address; | 1270 return address; |
1243 } | 1271 } |
1244 | 1272 |
1245 } // namespace blink | 1273 } // namespace blink |
1246 | 1274 |
1247 #endif // Heap_h | 1275 #endif // Heap_h |
OLD | NEW |