Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(161)

Side by Side Diff: Source/heap/Heap.h

Issue 131803005: Add more oilpan collections support (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 703 matching lines...) Expand 10 before | Expand all | Expand 10 after
714 virtual void makeConsistentForGC(); 714 virtual void makeConsistentForGC();
715 virtual bool isConsistentForGC(); 715 virtual bool isConsistentForGC();
716 716
717 ThreadState* threadState() { return m_threadState; } 717 ThreadState* threadState() { return m_threadState; }
718 HeapStats& stats() { return m_threadState->stats(); } 718 HeapStats& stats() { return m_threadState->stats(); }
719 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsC ache(); } 719 HeapContainsCache* heapContainsCache() { return m_threadState->heapContainsC ache(); }
720 720
721 inline Address allocate(size_t, const GCInfo*); 721 inline Address allocate(size_t, const GCInfo*);
722 void addToFreeList(Address, size_t); 722 void addToFreeList(Address, size_t);
723 void addPageToPool(HeapPage<Header>*); 723 void addPageToPool(HeapPage<Header>*);
724 inline size_t roundedAllocationSize(size_t size) 724 inline static size_t roundedAllocationSize(size_t size)
725 { 725 {
726 return allocationSizeFromSize(size) - sizeof(Header); 726 return allocationSizeFromSize(size) - sizeof(Header);
727 } 727 }
728 728
729 private: 729 private:
730 // Once pages have been used for one thread heap they will never 730 // Once pages have been used for one thread heap they will never
731 // be reused for another thread heap. Instead of unmapping, we add 731 // be reused for another thread heap. Instead of unmapping, we add
732 // the pages to a pool of pages to be reused later by this thread 732 // the pages to a pool of pages to be reused later by this thread
733 // heap. This is done as a security feature to avoid type 733 // heap. This is done as a security feature to avoid type
734 // confusion. The heap is type segregated by having separate 734 // confusion. The heap is type segregated by having separate
(...skipping 10 matching lines...) Expand all
745 745
746 PageMemory* storage() { return m_storage; } 746 PageMemory* storage() { return m_storage; }
747 PagePoolEntry* next() { return m_next; } 747 PagePoolEntry* next() { return m_next; }
748 748
749 private: 749 private:
750 PageMemory* m_storage; 750 PageMemory* m_storage;
751 PagePoolEntry* m_next; 751 PagePoolEntry* m_next;
752 }; 752 };
753 753
754 HEAP_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); 754 HEAP_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
755 size_t allocationSizeFromSize(size_t); 755 static size_t allocationSizeFromSize(size_t);
756 void addPageToHeap(const GCInfo*); 756 void addPageToHeap(const GCInfo*);
757 HEAP_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 757 HEAP_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
758 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 758 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
759 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 759 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
760 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } 760 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
761 void setAllocationPoint(Address point, size_t size) 761 void setAllocationPoint(Address point, size_t size)
762 { 762 {
763 ASSERT(!point || heapPageFromAddress(point)); 763 ASSERT(!point || heapPageFromAddress(point));
764 ASSERT(size <= HeapPage<Header>::payloadSize()); 764 ASSERT(size <= HeapPage<Header>::payloadSize());
765 m_currentAllocationPoint = point; 765 m_currentAllocationPoint = point;
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 memcpy(address, previous, copySize); 1154 memcpy(address, previous, copySize);
1155 return address; 1155 return address;
1156 } 1156 }
1157 1157
1158 class HeapAllocatorQuantizer { 1158 class HeapAllocatorQuantizer {
1159 public: 1159 public:
1160 template<typename T> 1160 template<typename T>
1161 static size_t quantizedSize(size_t count) 1161 static size_t quantizedSize(size_t count)
1162 { 1162 {
1163 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); 1163 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1164 return typename HeapTrait<T>::HeapType::roundedAllocationSize(count * si zeof(T)); 1164 return HeapTrait<T>::HeapType::roundedAllocationSize(count * sizeof(T));
1165 } 1165 }
1166 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; 1166 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1167 }; 1167 };
1168 1168
1169 class HeapAllocator { 1169 class HeapAllocator {
1170 public: 1170 public:
1171 typedef HeapAllocatorQuantizer Quantizer; 1171 typedef HeapAllocatorQuantizer Quantizer;
1172 typedef WebCore::Visitor Visitor; 1172 typedef WebCore::Visitor Visitor;
1173 static const bool isGarbageCollected = true; 1173 static const bool isGarbageCollected = true;
1174 1174
(...skipping 19 matching lines...) Expand all
1194 { 1194 {
1195 ASSERT_NOT_REACHED(); 1195 ASSERT_NOT_REACHED();
1196 return nullptr; 1196 return nullptr;
1197 } 1197 }
1198 1198
1199 static void deleteArray(void* ptr) 1199 static void deleteArray(void* ptr)
1200 { 1200 {
1201 ASSERT_NOT_REACHED(); 1201 ASSERT_NOT_REACHED();
1202 } 1202 }
1203 1203
1204 static void markUsingGCInfo(Visitor* visitor, const void* buffer) 1204 static void markUsingGCInfo(Visitor* visitor, const void* buffer)
haraken 2014/01/15 04:48:35 How about just calling this method "mark"? Or prob
1205 { 1205 {
1206 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->tr aceCallback()); 1206 visitor->mark(buffer, FinalizedHeapObjectHeader::fromPayload(buffer)->tr aceCallback());
1207 } 1207 }
1208 1208
1209 static void markNoTracing(Visitor* visitor, const void* t) 1209 static void markNoTracing(Visitor* visitor, const void* t)
haraken 2014/01/15 04:48:35 markNoTracing => markWithoutTracing ?
Erik Corry 2014/01/15 09:27:32 I think we have a little too much naming churn. Y
1210 { 1210 {
1211 visitor->mark(t); 1211 visitor->mark(t);
1212 } 1212 }
1213 1213
1214 template<typename T, typename Traits> 1214 template<typename T, typename Traits>
1215 static void mark(Visitor* visitor, T& t) 1215 static void trace(Visitor* visitor, T& t)
1216 { 1216 {
1217 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, false, T, Traits>::mark(visitor, t); 1217 CollectionBackingTraceTrait<Traits::needsTracing, Traits::isWeak, false, T, Traits>::mark(visitor, t);
1218 } 1218 }
1219 1219
1220 template<typename T> 1220 template<typename T>
1221 static bool hasDeadMember(Visitor*, const T&) 1221 static bool hasDeadMember(Visitor*, const T&)
1222 { 1222 {
1223 return false; 1223 return false;
1224 } 1224 }
1225 1225
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1281 { 1281 {
1282 return *(other); 1282 return *(other);
1283 } 1283 }
1284 1284
1285 private: 1285 private:
1286 template<typename T, size_t u, typename V> friend class WTF::Vector; 1286 template<typename T, size_t u, typename V> friend class WTF::Vector;
1287 template<typename T, typename U, typename V, typename W> friend class WTF::H ashSet; 1287 template<typename T, typename U, typename V, typename W> friend class WTF::H ashSet;
1288 template<typename T, typename U, typename V, typename W, typename X, typenam e Y> friend class WTF::HashMap; 1288 template<typename T, typename U, typename V, typename W, typename X, typenam e Y> friend class WTF::HashMap;
1289 }; 1289 };
1290 1290
1291 // FIXME: These should just be template aliases:
1292 //
1293 // template<typename T, size_t inlineCapacity = 0>
1294 // using HeapVector = Vector<T, inlineCapacity, HeapAllocator>;
1295 //
1296 // as soon as all the compilers we care about support that.
1297 // MSVC supports it only in MSVC 2013.
1298 template<
1299 typename KeyArg,
1300 typename MappedArg,
1301 typename HashArg = typename DefaultHash<KeyArg>::Hash,
1302 typename KeyTraitsArg = HashTraits<KeyArg>,
1303 typename MappedTraitsArg = HashTraits<MappedArg> >
1304 class HeapHashMap : public HashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, Map pedTraitsArg, HeapAllocator> { };
1305
1306 template<
1307 typename ValueArg,
1308 typename HashArg = typename DefaultHash<ValueArg>::Hash,
1309 typename TraitsArg = HashTraits<ValueArg> >
1310 class HeapHashSet : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> { };
1311
1312 template<typename T, size_t inlineCapacity = 0>
1313 class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
1314 public:
1315 HeapVector() { }
1316
1317 explicit HeapVector(size_t size) : Vector<T, inlineCapacity, HeapAllocator>( size)
1318 {
1319 }
1320
1321 template<size_t otherCapacity>
1322 HeapVector(const HeapVector<T, otherCapacity>& other)
1323 : Vector<T, inlineCapacity, HeapAllocator>(other)
1324 {
1325 }
1326
1327 template<typename U>
1328 void append(const U& other)
1329 {
1330 Vector<T, inlineCapacity, HeapAllocator>::append(other);
1331 }
1332
1333 template<typename U, size_t otherCapacity>
1334 void append(const HeapVector<U, otherCapacity>& other)
1335 {
1336 const Vector<U, otherCapacity, HeapAllocator>& otherVector = other;
1337 Vector<T, inlineCapacity, HeapAllocator>::append(otherVector);
1338 }
1339 };
1340
1341 template<typename SetType>
1342 inline void clearWeakSet(Visitor* visitor, SetType& set)
1343 {
1344 typedef Vector<typename SetType::ValueType, 10> DeadSet;
1345 DeadSet dead;
1346
1347 for (typename SetType::iterator it = set.begin(), end = set.end(); it != end ; ++it) {
1348 if (!ObjectAliveTrait<typename SetType::ValueType>::isAlive(visitor, *it ))
1349 dead.append(*it);
1350 }
1351
1352 for (typename DeadSet::iterator it = dead.begin(), end = dead.end(); it != e nd; ++it) {
1353 set.remove(*it);
1354 }
1355 }
1356
1357 template<typename SetType>
1358 inline void clearWeakCountedSet(Visitor* visitor, SetType& set)
1359 {
1360 typedef Vector<typename SetType::ValueType, 10> DeadSet;
1361 DeadSet dead;
1362
1363 for (typename SetType::iterator it = set.begin(), end = set.end(); it != end ; ++it) {
1364 if (!ObjectAliveTrait<typename SetType::ValueType>::isAlive(visitor, it- >key))
1365 dead.append(it->key);
1366 }
1367
1368 for (typename DeadSet::iterator it = dead.begin(), end = dead.end(); it != e nd; ++it) {
1369 set.removeAll(*it);
1370 }
1371 }
1372
1373 template<typename MapType>
1374 inline void clearWeakMap(Visitor* visitor, MapType& map)
1375 {
1376 typedef Vector<typename MapType::KeyType, 10> DeadSet;
1377 DeadSet dead;
1378
1379 for (typename MapType::iterator it = map.begin(), end = map.end(); it != end ; ++it) {
1380 if (!ObjectAliveTrait<typename MapType::KeyType>::isAlive(visitor, it->k ey))
1381 dead.append(it->key);
1382 }
1383
1384 for (typename DeadSet::iterator it = dead.begin(), end = dead.end(); it != e nd; ++it) {
1385 map.remove(*it);
1386 }
1387 }
1388
1389 template<typename MapType, typename T>
1390 inline void clearWeakMap(Visitor* visitor, MapType& map, void (*callback)(T, con st typename MapType::KeyType&, typename MapType::MappedType), T data)
haraken 2014/01/15 04:48:35 Can we make |callback| and |data| default paramete
Erik Corry 2014/01/15 09:27:32 Actually these routines are there for handling off
1391 {
1392 typedef Vector<typename MapType::KeyType, 10> DeadSet;
1393 DeadSet dead;
1394
1395 for (typename MapType::iterator it = map.begin(), end = map.end(); it != end ; ++it) {
1396 if (!ObjectAliveTrait<typename MapType::KeyType>::isAlive(visitor, it->k ey)) {
1397 dead.append(it->key);
1398 callback(data, it->key, it->value);
1399 }
1400 }
1401
1402 for (typename DeadSet::iterator it = dead.begin(), end = dead.end(); it != e nd; ++it) {
1403 map.remove(*it);
1404 }
1405 }
1406
1407 template<typename MapType>
1408 inline void clearWeakMapValues(Visitor* visitor, MapType& map)
haraken 2014/01/15 04:48:35 When is this method used? It looks a bit strange t
Erik Corry 2014/01/15 09:27:32 This code is gone now, but we have the same semant
1409 {
1410 typedef Vector<typename MapType::KeyType, 10> DeadSet;
1411 DeadSet dead;
1412
1413 for (typename MapType::iterator it = map.begin(), end = map.end(); it != end ; ++it) {
1414 if (!ObjectAliveTrait<typename MapType::MappedType>::isAlive(visitor, it ->value))
1415 dead.append(it->key);
1416 }
1417
1418 for (typename DeadSet::iterator it = dead.begin(), end = dead.end(); it != e nd; ++it) {
1419 map.remove(*it);
1420 }
1421 }
1422
1423 template<typename Key, typename Value>
1424 struct ThreadingTrait<HeapHashMap<Key, Value> > : public ThreadingTrait<HashMap< Key, Value, HeapAllocator> > { };
1425 template<typename Value>
1426 struct ThreadingTrait<HeapHashSet<Value> > : public ThreadingTrait<HashSet<Value , HeapAllocator> > { };
1427 template<typename T, size_t inlineCapacity>
1428 struct ThreadingTrait<HeapVector<T, inlineCapacity> > : public ThreadingTrait<Ve ctor<T, inlineCapacity, HeapAllocator> > { };
1429
1291 // The standard implementation of GCInfoTrait<T>::get() just returns a static 1430 // The standard implementation of GCInfoTrait<T>::get() just returns a static
1292 // from the class T, but we can't do that for HashMap, HashSet and Vector 1431 // from the class T, but we can't do that for HashMap, HashSet and Vector
1293 // because they are in WTF and know nothing of GCInfos. Instead we have a 1432 // because they are in WTF and know nothing of GCInfos. Instead we have a
1294 // specialization of GCInfoTrait for these three classes here. 1433 // specialization of GCInfoTrait for these three classes here.
1295 1434
1296 template<typename Key, typename Value, typename T, typename U, typename V> 1435 template<typename Key, typename Value, typename T, typename U, typename V>
1297 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > { 1436 struct GCInfoTrait<HashMap<Key, Value, T, U, V, HeapAllocator> > {
1298 static const GCInfo* get() { return &info; } 1437 static const GCInfo* get() { return &info; }
1299 static const GCInfo info; 1438 static const GCInfo info;
1300 }; 1439 };
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after
1555 visitor->mark(backing, 0); 1694 visitor->mark(backing, 0);
1556 } 1695 }
1557 static void checkTypeMarker(Visitor* visitor, const Backing* backing) 1696 static void checkTypeMarker(Visitor* visitor, const Backing* backing)
1558 { 1697 {
1559 #ifndef NDEBUG 1698 #ifndef NDEBUG
1560 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Ba cking>()); 1699 visitor->checkTypeMarker(const_cast<Backing*>(backing), getTypeMarker<Ba cking>());
1561 #endif 1700 #endif
1562 } 1701 }
1563 }; 1702 };
1564 1703
1704 template<typename T, typename U, typename V, typename W, typename X>
1705 struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { };
1706 template<typename T, typename U, typename V>
1707 struct GCInfoTrait<HeapHashSet<T, U, V> > : public GCInfoTrait<HashSet<T, U, V, HeapAllocator> > { };
1708 template<typename T, size_t inlineCapacity>
1709 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T , inlineCapacity, HeapAllocator> > { };
1710
1565 template<typename T> 1711 template<typename T>
1566 struct IfWeakMember; 1712 struct IfWeakMember;
1567 1713
1568 template<typename T> 1714 template<typename T>
1569 struct IfWeakMember { 1715 struct IfWeakMember {
1570 template<typename U> 1716 template<typename U>
1571 static bool isDead(Visitor*, const U&) { return false; } 1717 static bool isDead(Visitor*, const U&) { return false; }
1572 }; 1718 };
1573 1719
1574 template<typename T> 1720 template<typename T>
1575 struct IfWeakMember<WeakMember<T> > { 1721 struct IfWeakMember<WeakMember<T> > {
1576 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.raw()); } 1722 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.raw()); }
1577 }; 1723 };
1578 1724
1579 template<typename K, typename V, typename HashFunctions, typename KeyTraits, typ ename ValueTraits>
1580 void processWeakOffHeapHashMap(Visitor* visitor, void* self)
1581 {
1582 typedef HashMap<K, V, WTF::DefaultAllocator, HashFunctions, KeyTraits, Value Traits> Map;
1583 Map* map = reinterpret_cast<Map*>(self);
1584 // Collect up keys here because we can't modify a hash map while iterating
1585 // over it.
1586 Vector<K> deletionKeys;
1587 ASSERT(KeyTraits::isWeak || ValueTraits::isWeak);
1588 typedef typename Map::iterator Iterator;
1589 Iterator endIterator(map->end());
1590 for (Iterator it = map->begin(); it != endIterator; ++it) {
1591 if (IfWeakMember<K>::isDead(visitor, it->key))
1592 deletionKeys.append(it->key);
1593 else if (IfWeakMember<V>::isDead(visitor, it->value))
1594 deletionKeys.append(it->key);
1595 }
1596 size_t size = deletionKeys.size();
1597 if (size == map->size()) {
1598 map->clear();
1599 return;
1600 }
1601 for (size_t i = 0; i < size; i++)
1602 map->remove(deletionKeys[i]);
1603 }
1604
1605 template<typename T, typename HashFunctions, typename Traits>
1606 void processWeakOffHeapHashSet(Visitor* visitor, void* self)
1607 {
1608 typedef HashSet<T, WTF::DefaultAllocator, HashFunctions, Traits> Set;
1609 Set* set = reinterpret_cast<Set*>(self);
1610 ASSERT(Traits::isWeak);
1611 // Collect up keys here because we can't modify a hash set while iterating
1612 // over it.
1613 Vector<T> deletionKeys;
1614 typedef typename Set::iterator Iterator;
1615 Iterator endIterator(set->end());
1616 for (Iterator it = set->begin(); it != endIterator; ++it) {
1617 if (IfWeakMember<T>::isDead(visitor, *it))
1618 deletionKeys.append(*it);
1619 }
1620 size_t size = deletionKeys.size();
1621 if (size == set->size()) {
1622 set->clear();
1623 return;
1624 }
1625 for (size_t i = 0; i < size; i++)
1626 set->remove(deletionKeys[i]);
1627 }
1628
1629 #if COMPILER(CLANG) 1725 #if COMPILER(CLANG)
1630 // Clang does not export the symbols that we have explicitly asked it 1726 // Clang does not export the symbols that we have explicitly asked it
1631 // to export. This forces it to export all the methods from ThreadHeap. 1727 // to export. This forces it to export all the methods from ThreadHeap.
1632 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*); 1728 template<> void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInf o*);
1633 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*); 1729 template<> void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo*);
1634 extern template class HEAP_EXPORT ThreadHeap<FinalizedHeapObjectHeader>; 1730 extern template class HEAP_EXPORT ThreadHeap<FinalizedHeapObjectHeader>;
1635 extern template class HEAP_EXPORT ThreadHeap<HeapObjectHeader>; 1731 extern template class HEAP_EXPORT ThreadHeap<HeapObjectHeader>;
1636 #endif 1732 #endif
1637 1733
1638 } 1734 }
1639 1735
1640 #endif // Heap_h 1736 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698