Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 875503003: Allow Oilpan heap objects account for their external allocations. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: minor optimizations and tuning Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 980 matching lines...) Expand 10 before | Expand all | Expand 10 after
991 } 991 }
992 992
993 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); } 993 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); }
994 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); } 994 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); }
995 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); } 995 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); }
996 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); } 996 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); }
997 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); } 997 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); }
998 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); } 998 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); }
999 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); } 999 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); }
1000 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); } 1000 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); }
1001
1001 static double estimatedMarkingTime(); 1002 static double estimatedMarkingTime();
1002 1003
1004 // On object allocation, register the object's externally allocated memory.
1005 static inline void increaseExternallyAllocatedBytes(size_t);
1006 static size_t externallyAllocatedBytes() { return acquireLoad(&s_externallyA llocatedBytes); }
1007
1008 // On object tracing, register the object's externally allocated memory (as still live.)
1009 static void increaseExternallyAllocatedBytesAlive(size_t delta)
1010 {
1011 ASSERT(ThreadState::current()->isInGC());
1012 s_externallyAllocatedBytesAlive += delta;
1013 }
1014 static size_t externallyAllocatedBytesAlive() { return s_externallyAllocated BytesAlive; }
1015
1016 static void requestUrgentGC();
1017 static void clearUrgentGC() { releaseStore(&s_requestedUrgentGC, 0); }
1018 static bool isUrgentGCRequested() { return acquireLoad(&s_requestedUrgentGC) ; }
1019
1003 private: 1020 private:
1004 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted 1021 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted
1005 // by base addresses. 1022 // by base addresses.
1006 class RegionTree { 1023 class RegionTree {
1007 public: 1024 public:
1008 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { } 1025 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { }
1009 ~RegionTree() 1026 ~RegionTree()
1010 { 1027 {
1011 delete m_left; 1028 delete m_left;
1012 delete m_right; 1029 delete m_right;
1013 } 1030 }
1014 PageMemoryRegion* lookup(Address); 1031 PageMemoryRegion* lookup(Address);
1015 static void add(RegionTree*, RegionTree**); 1032 static void add(RegionTree*, RegionTree**);
1016 static void remove(PageMemoryRegion*, RegionTree**); 1033 static void remove(PageMemoryRegion*, RegionTree**);
1017 private: 1034 private:
1018 PageMemoryRegion* m_region; 1035 PageMemoryRegion* m_region;
1019 RegionTree* m_left; 1036 RegionTree* m_left;
1020 RegionTree* m_right; 1037 RegionTree* m_right;
1021 }; 1038 };
1022 1039
1023 static void resetAllocatedObjectSize() { ASSERT(ThreadState::current()->isIn GC()); s_allocatedObjectSize = 0; } 1040 // Reset counters that track live and allocated-since-last-GC sizes.
1024 static void resetMarkedObjectSize() { ASSERT(ThreadState::current()->isInGC( )); s_markedObjectSize = 0; } 1041 static void resetHeapCounters();
1025 1042
1026 static Visitor* s_markingVisitor; 1043 static Visitor* s_markingVisitor;
1027 static CallbackStack* s_markingStack; 1044 static CallbackStack* s_markingStack;
1028 static CallbackStack* s_postMarkingCallbackStack; 1045 static CallbackStack* s_postMarkingCallbackStack;
1029 static CallbackStack* s_weakCallbackStack; 1046 static CallbackStack* s_weakCallbackStack;
1030 static CallbackStack* s_ephemeronStack; 1047 static CallbackStack* s_ephemeronStack;
1031 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1048 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1032 static bool s_shutdownCalled; 1049 static bool s_shutdownCalled;
1033 static bool s_lastGCWasConservative; 1050 static bool s_lastGCWasConservative;
1034 static FreePagePool* s_freePagePool; 1051 static FreePagePool* s_freePagePool;
1035 static OrphanedPagePool* s_orphanedPagePool; 1052 static OrphanedPagePool* s_orphanedPagePool;
1036 static RegionTree* s_regionTree; 1053 static RegionTree* s_regionTree;
1037 static size_t s_allocatedSpace; 1054 static size_t s_allocatedSpace;
1038 static size_t s_allocatedObjectSize; 1055 static size_t s_allocatedObjectSize;
1039 static size_t s_markedObjectSize; 1056 static size_t s_markedObjectSize;
1057 static size_t s_externallyAllocatedBytes;
1058 static size_t s_externallyAllocatedBytesAlive;
1059 static unsigned s_requestedUrgentGC;
1060
1040 friend class ThreadState; 1061 friend class ThreadState;
1041 }; 1062 };
1042 1063
1043 // Base class for objects allocated in the Blink garbage-collected heap. 1064 // Base class for objects allocated in the Blink garbage-collected heap.
1044 // 1065 //
1045 // Defines a 'new' operator that allocates the memory in the heap. 'delete' 1066 // Defines a 'new' operator that allocates the memory in the heap. 'delete'
1046 // should not be called on objects that inherit from GarbageCollected. 1067 // should not be called on objects that inherit from GarbageCollected.
1047 // 1068 //
1048 // Instances of GarbageCollected will *NOT* get finalized. Their destructor 1069 // Instances of GarbageCollected will *NOT* get finalized. Their destructor
1049 // will not be called. Therefore, only classes that have trivial destructors 1070 // will not be called. Therefore, only classes that have trivial destructors
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
1429 // FIXME: We don't support reallocate() for finalizable objects. 1450 // FIXME: We don't support reallocate() for finalizable objects.
1430 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); 1451 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
1431 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); 1452 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index());
1432 size_t copySize = previousHeader->payloadSize(); 1453 size_t copySize = previousHeader->payloadSize();
1433 if (copySize > size) 1454 if (copySize > size)
1434 copySize = size; 1455 copySize = size;
1435 memcpy(address, previous, copySize); 1456 memcpy(address, previous, copySize);
1436 return address; 1457 return address;
1437 } 1458 }
1438 1459
1460 void Heap::increaseExternallyAllocatedBytes(size_t delta)
1461 {
1462 // Flag GC urgency on a 50% increase in external allocation
1463 // since the last GC, but not for less than 100M.
1464 //
1465 // FIXME: consider other, 'better' policies (e.g., have the count of
1466 // heap objects with external allocations be taken into
1467 // account, ...) The overall goal here is to trigger a
1468 // GC such that it considerably lessens memory pressure
1469 // for a renderer process, when absolutely needed.
1470 size_t externalBytesAllocatedSinceLastGC = atomicAdd(&s_externallyAllocatedB ytes, static_cast<long>(delta));
1471 if (LIKELY(externalBytesAllocatedSinceLastGC < 100 * 1024 * 1024))
1472 return;
1473
1474 if (UNLIKELY(isUrgentGCRequested()))
1475 return;
1476
1477 size_t externalBytesAliveAtLastGC = externallyAllocatedBytesAlive();
1478 if (UNLIKELY(externalBytesAllocatedSinceLastGC > externalBytesAliveAtLastGC / 2))
1479 Heap::requestUrgentGC();
1480 }
1481
1439 class HeapAllocatorQuantizer { 1482 class HeapAllocatorQuantizer {
1440 public: 1483 public:
1441 template<typename T> 1484 template<typename T>
1442 static size_t quantizedSize(size_t count) 1485 static size_t quantizedSize(size_t count)
1443 { 1486 {
1444 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); 1487 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
1445 return BaseHeap::roundedAllocationSize(count * sizeof(T)); 1488 return BaseHeap::roundedAllocationSize(count * sizeof(T));
1446 } 1489 }
1447 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; 1490 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
1448 }; 1491 };
(...skipping 1007 matching lines...) Expand 10 before | Expand all | Expand 10 after
2456 template<typename T, size_t inlineCapacity> 2499 template<typename T, size_t inlineCapacity>
2457 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; 2500 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { };
2458 template<typename T, size_t inlineCapacity> 2501 template<typename T, size_t inlineCapacity>
2459 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; 2502 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { };
2460 template<typename T, typename U, typename V> 2503 template<typename T, typename U, typename V>
2461 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; 2504 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { };
2462 2505
2463 } // namespace blink 2506 } // namespace blink
2464 2507
2465 #endif // Heap_h 2508 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698