OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 972 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
983 } | 983 } |
984 | 984 |
985 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); } | 985 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); } |
986 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); } | 986 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); } |
987 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); } | 987 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); } |
988 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); } | 988 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); } |
989 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); } | 989 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); } |
990 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); } | 990 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); } |
991 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); } | 991 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); } |
992 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); } | 992 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); } |
993 | |
993 static double estimatedMarkingTime(); | 994 static double estimatedMarkingTime(); |
994 | 995 |
996 // On object allocation, register the object's externally allocated memory. | |
997 static inline void increaseExternallyAllocatedBytes(size_t); | |
998 static size_t externallyAllocatedBytes() { return acquireLoad(&s_externallyA llocatedBytes); } | |
999 | |
1000 // On object tracing, register the object's externally allocated memory (as still live.) | |
1001 static void increaseExternallyAllocatedBytesAlive(size_t delta) | |
1002 { | |
1003 ASSERT(ThreadState::current()->isInGC()); | |
1004 s_externallyAllocatedBytesAlive += delta; | |
1005 } | |
1006 static size_t externallyAllocatedBytesAlive() { return s_externallyAllocated BytesAlive; } | |
1007 | |
1008 static void requestUrgentGC(); | |
1009 static void clearUrgentGC() { releaseStore(&s_requestedUrgentGC, 0); } | |
1010 static bool isUrgentGCRequested() { return acquireLoad(&s_requestedUrgentGC) ; } | |
1011 | |
995 private: | 1012 private: |
996 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted | 1013 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted |
997 // by base addresses. | 1014 // by base addresses. |
998 class RegionTree { | 1015 class RegionTree { |
999 public: | 1016 public: |
1000 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { } | 1017 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { } |
1001 ~RegionTree() | 1018 ~RegionTree() |
1002 { | 1019 { |
1003 delete m_left; | 1020 delete m_left; |
1004 delete m_right; | 1021 delete m_right; |
1005 } | 1022 } |
1006 PageMemoryRegion* lookup(Address); | 1023 PageMemoryRegion* lookup(Address); |
1007 static void add(RegionTree*, RegionTree**); | 1024 static void add(RegionTree*, RegionTree**); |
1008 static void remove(PageMemoryRegion*, RegionTree**); | 1025 static void remove(PageMemoryRegion*, RegionTree**); |
1009 private: | 1026 private: |
1010 PageMemoryRegion* m_region; | 1027 PageMemoryRegion* m_region; |
1011 RegionTree* m_left; | 1028 RegionTree* m_left; |
1012 RegionTree* m_right; | 1029 RegionTree* m_right; |
1013 }; | 1030 }; |
1014 | 1031 |
1015 static void resetAllocatedObjectSize() { ASSERT(ThreadState::current()->isIn GC()); s_allocatedObjectSize = 0; } | 1032 // Reset counters that track live and allocated-since-last-GC sizes. |
1016 static void resetMarkedObjectSize() { ASSERT(ThreadState::current()->isInGC( )); s_markedObjectSize = 0; } | 1033 static void resetHeapCounters(); |
1017 | 1034 |
1018 static Visitor* s_markingVisitor; | 1035 static Visitor* s_markingVisitor; |
1019 static CallbackStack* s_markingStack; | 1036 static CallbackStack* s_markingStack; |
1020 static CallbackStack* s_postMarkingCallbackStack; | 1037 static CallbackStack* s_postMarkingCallbackStack; |
1021 static CallbackStack* s_weakCallbackStack; | 1038 static CallbackStack* s_weakCallbackStack; |
1022 static CallbackStack* s_ephemeronStack; | 1039 static CallbackStack* s_ephemeronStack; |
1023 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | 1040 static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
1024 static bool s_shutdownCalled; | 1041 static bool s_shutdownCalled; |
1025 static bool s_lastGCWasConservative; | 1042 static bool s_lastGCWasConservative; |
1026 static FreePagePool* s_freePagePool; | 1043 static FreePagePool* s_freePagePool; |
1027 static OrphanedPagePool* s_orphanedPagePool; | 1044 static OrphanedPagePool* s_orphanedPagePool; |
1028 static RegionTree* s_regionTree; | 1045 static RegionTree* s_regionTree; |
1029 static size_t s_allocatedSpace; | 1046 static size_t s_allocatedSpace; |
1030 static size_t s_allocatedObjectSize; | 1047 static size_t s_allocatedObjectSize; |
1031 static size_t s_markedObjectSize; | 1048 static size_t s_markedObjectSize; |
1049 static size_t s_externallyAllocatedBytes; | |
1050 static size_t s_externallyAllocatedBytesAlive; | |
1051 static unsigned s_requestedUrgentGC; | |
1052 | |
1032 friend class ThreadState; | 1053 friend class ThreadState; |
1033 }; | 1054 }; |
1034 | 1055 |
1035 // Base class for objects allocated in the Blink garbage-collected heap. | 1056 // Base class for objects allocated in the Blink garbage-collected heap. |
1036 // | 1057 // |
1037 // Defines a 'new' operator that allocates the memory in the heap. 'delete' | 1058 // Defines a 'new' operator that allocates the memory in the heap. 'delete' |
1038 // should not be called on objects that inherit from GarbageCollected. | 1059 // should not be called on objects that inherit from GarbageCollected. |
1039 // | 1060 // |
1040 // Instances of GarbageCollected will *NOT* get finalized. Their destructor | 1061 // Instances of GarbageCollected will *NOT* get finalized. Their destructor |
1041 // will not be called. Therefore, only classes that have trivial destructors | 1062 // will not be called. Therefore, only classes that have trivial destructors |
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1421 // FIXME: We don't support reallocate() for finalizable objects. | 1442 // FIXME: We don't support reallocate() for finalizable objects. |
1422 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 1443 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
1423 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 1444 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
1424 size_t copySize = previousHeader->payloadSize(); | 1445 size_t copySize = previousHeader->payloadSize(); |
1425 if (copySize > size) | 1446 if (copySize > size) |
1426 copySize = size; | 1447 copySize = size; |
1427 memcpy(address, previous, copySize); | 1448 memcpy(address, previous, copySize); |
1428 return address; | 1449 return address; |
1429 } | 1450 } |
1430 | 1451 |
1452 void Heap::increaseExternallyAllocatedBytes(size_t delta) | |
1453 { | |
1454 size_t externalBytesAllocatedSinceLastGC = atomicAdd(&s_externallyAllocatedB ytes, static_cast<long>(delta)); | |
1455 | |
1456 if (UNLIKELY(isUrgentGCRequested())) | |
haraken
2015/02/23 08:42:27
Can we move this check down to line 1469? For perf
sof
2015/02/23 12:26:13
Done.
| |
1457 return; | |
1458 | |
1459 // Flag GC urgency on a 50% increase in external allocation | |
1460 // since the last GC, but not for less than 100M. | |
1461 // | |
1462 // FIXME: consider other, 'better' policies (e.g., have the count of | |
1463 // heap objects with external allocations be taken into | |
1464 // account, ...) The overall goal here is to trigger a | |
1465 // GC such that it considerably lessens memory pressure | |
1466 // for a renderer process, when absolutely needed. | |
1467 if (LIKELY(externalBytesAllocatedSinceLastGC < 100 * 1024 * 1024)) | |
1468 return; | |
1469 | |
1470 size_t externalBytesAliveAtLastGC = externallyAllocatedBytesAlive(); | |
1471 if (UNLIKELY(externalBytesAllocatedSinceLastGC > externalBytesAliveAtLastGC / 2)) | |
1472 Heap::requestUrgentGC(); | |
1473 } | |
1474 | |
1431 class HeapAllocatorQuantizer { | 1475 class HeapAllocatorQuantizer { |
1432 public: | 1476 public: |
1433 template<typename T> | 1477 template<typename T> |
1434 static size_t quantizedSize(size_t count) | 1478 static size_t quantizedSize(size_t count) |
1435 { | 1479 { |
1436 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); | 1480 RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T)); |
1437 return BaseHeap::roundedAllocationSize(count * sizeof(T)); | 1481 return BaseHeap::roundedAllocationSize(count * sizeof(T)); |
1438 } | 1482 } |
1439 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; | 1483 static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize; |
1440 }; | 1484 }; |
(...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2442 template<typename T, size_t inlineCapacity> | 2486 template<typename T, size_t inlineCapacity> |
2443 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; | 2487 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; |
2444 template<typename T, size_t inlineCapacity> | 2488 template<typename T, size_t inlineCapacity> |
2445 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; | 2489 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; |
2446 template<typename T, typename U, typename V> | 2490 template<typename T, typename U, typename V> |
2447 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; | 2491 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; |
2448 | 2492 |
2449 } // namespace blink | 2493 } // namespace blink |
2450 | 2494 |
2451 #endif // Heap_h | 2495 #endif // Heap_h |
OLD | NEW |