Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Unified Diff: third_party/WebKit/Source/platform/heap/HeapCompact.h

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: synchronize on compaction finish Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/HeapCompact.h
diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.h b/third_party/WebKit/Source/platform/heap/HeapCompact.h
new file mode 100644
index 0000000000000000000000000000000000000000..c7badc6a49f3a22e4e0ea1d779d90db46b382c8a
--- /dev/null
+++ b/third_party/WebKit/Source/platform/heap/HeapCompact.h
@@ -0,0 +1,226 @@
+// Copyright 2016 Opera Software AS. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HeapCompact_h
+#define HeapCompact_h
+
+#include "platform/PlatformExport.h"
+#include "platform/heap/BlinkGC.h"
+#include "wtf/PtrUtil.h"
+#include "wtf/ThreadingPrimitives.h"
+#include "wtf/Vector.h"
+
+#include <bitset>
+#include <utility>
+
+// Global dev/debug switches:
+
+// Set to 0 to prevent compaction GCs, disabling the heap compaction feature.
+#define ENABLE_HEAP_COMPACTION 1
+
+// Emit debug info during compaction.
+#define DEBUG_HEAP_COMPACTION 0
+
+// Emit stats on freelist occupancy.
+// 0 - disabled, 1 - minimal, 2 - verbose.
+#define DEBUG_HEAP_FREELIST 0
+
+// Log the amount of time spent compacting.
+#define DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME 0
+
+// Compact during all idle + precise GCs; for debugging.
+#define STRESS_TEST_HEAP_COMPACTION 0
+
+#if OS(WIN)
+// TODO: use standard logging facilities.
+#define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \
+ do { \
+ char output[512]; \
+ sprintf(output, msg, ##__VA_ARGS__); \
+ OutputDebugStringA(output); \
+ } while (0)
+#else
+#define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \
+ fprintf(stderr, msg, ##__VA_ARGS__)
+#endif
+
+#if DEBUG_HEAP_COMPACTION
+#define LOG_HEAP_COMPACTION(msg, ...) \
+ LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
+#else
+#define LOG_HEAP_COMPACTION(msg, ...) \
+ do { \
+ } while (0)
+#endif
+
+#if DEBUG_HEAP_FREELIST
+#define LOG_HEAP_FREELIST(msg, ...) \
+ LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
+#else
+#define LOG_HEAP_FREELIST(msg, ...) \
+ do { \
+ } while (0)
+#endif
+
+#if DEBUG_HEAP_FREELIST == 2
+#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
+ LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
+#else
+#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
+ do { \
+ } while (0)
+#endif
haraken 2016/12/02 12:43:20 Consider cleaning up these macros before landing t
sof 2016/12/04 14:55:38 Added TODO as an additional reminder to do so.
+
+namespace blink {
+
+class NormalPageArena;
+class BasePage;
+class ThreadHeap;
+class ThreadState;
+
+class PLATFORM_EXPORT HeapCompact final {
+ public:
+ static std::unique_ptr<HeapCompact> create() {
+ return std::unique_ptr<HeapCompact>(new HeapCompact);
haraken 2016/12/02 12:43:20 wrapUnique
sof 2016/12/04 14:55:37 Done, not sure I "get" the benefits wrapUnique().
+ }
+
+ ~HeapCompact();
+
+ // Check if a GC for the given type and reason should perform additional
+ // heap compaction once it has run.
+ //
+ // If deemed worthy, heap compaction is implicitly initialized and set up.
+ void checkIfCompacting(ThreadHeap*,
+ Visitor*,
+ BlinkGC::GCType,
+ BlinkGC::GCReason);
+
+ // Returns true if the ongoing GC will perform compaction.
+ bool isCompacting() const { return m_doCompact; }
+
+ // Returns true if the ongoing GC will perform compaction.
haraken 2016/12/02 12:43:20 Update the comment.
sof 2016/12/04 14:55:37 Done.
+ bool isCompactingArena(int arenaIndex) const {
+ return m_doCompact && (m_compactableHeaps & (0x1u << arenaIndex));
haraken 2016/12/02 12:43:20 Avoid hard-coding 0x1u.
sof 2016/12/04 14:55:37 That one-liner & idiom is as clear as can be; I do
+ }
+
+ // Returns |true| if the ongoing GC may compact the given arena/sub-heap.
+ static bool isCompactableArena(int arenaIndex) {
+ return arenaIndex >= BlinkGC::Vector1ArenaIndex &&
+ arenaIndex <= BlinkGC::HashTableArenaIndex;
+ }
+
+ // See |Heap::registerMovingObjectReference()| documentation.
+ void registerMovingObjectReference(MovableReference* slot);
+
+ // See |Heap::registerMovingObjectCallback()| documentation.
+ void registerMovingObjectCallback(MovableReference,
+ MovingObjectCallback,
+ void* callbackData);
+
+ // Register |slot| as containing a reference to the interior of a movable
+ // object.
+ //
+ // |registerMovingObjectReference()| handles the common case of holding
+ // an external reference to a backing store object. |registerRelocation()|
+ // handles the relocation of external references into backing store
+ // objects -- something not currently done & needed by the Blink codebase,
+ // but kept open as a possibility..until further notice.
+ void registerRelocation(MovableReference* slot);
+
+ // Signal that the compaction pass is being started, finished by some
+ // ThreadState.
+ void startCompacting(ThreadState*);
+ void finishedCompacting(ThreadState*);
+
+ // Perform any relocation post-processing after having completed compacting
+ // the given sub heap. Pass along the number of pages that were freed from
+ // the arena, along with their total size.
+ void finishedArenaCompaction(NormalPageArena*,
+ size_t freedPages,
+ size_t freedSize);
+
+ // Record the main thread's compactable freelist residency (in bytes),
+ // along with with overall size. Sizes are relative to the compactable
+ // sub-heaps, and not a total count. Along with the totals, per-heap
+ // numbers are also provided.
+ //
+ // The recording is done after the decision has been made on whether
+ // or not to compact during the _current_ GC. If compacting, the size
+ // sampling will be ignored and the internal counters are reset.
+ //
+ // However, if not compacting, the values will be consulted the next time
+ // a GC goes ahead and it decides whether to compact or not.
+ void setHeapResidency(
+ size_t liveSize,
+ size_t freeSize,
haraken 2016/12/02 12:43:20 totalArenaSize totalFreeListSize to be consistent
sof 2016/12/04 14:55:37 Done.
+ const Vector<std::pair<size_t, size_t>>& heapResidencies);
+
+ // Register the heap page as containing live objects that will all be
+ // compacted. When the GC is compacting, that is.
+ void addCompactablePage(BasePage*);
+
+ // Notify heap compaction that object at |from| has been moved to.. |to|.
+ // (Called by the sweep compaction pass.)
+ void movedObject(Address from, Address to);
+
+ // For unit testing only: arrange for a compaction GC to be triggered
+ // next time a non-conservative GC is run. Sets the compact-next flag
+ // to the new value, returning old.
+ static bool scheduleCompactionGCForTesting(bool);
haraken 2016/12/02 12:43:20 Shall we simply forbid the heap compaction on cons
sof 2016/12/04 14:55:38 Yes, we absolutely must disable compaction during
+
+ private:
+ class MovableObjectFixups;
+
+ HeapCompact();
+
+ // Parameters controlling when compaction should be done:
+
+ // Number of GCs that must have passed since last compaction GC.
+ static const int kCompactIntervalThreshold = 10;
haraken 2016/12/02 12:43:20 kGCIntervalThresholdSinceLastCompaction (c.f., m_g
sof 2016/12/04 14:55:37 kGCCountSinceLastCompactionThreshold
+
+ // Freelist size threshold that must be exceeded before compaction
+ // should be considered.
+ static const size_t kFreeThreshold = 512 * 1024;
haraken 2016/12/02 12:43:20 kFreeListSizeThreashold
sof 2016/12/04 14:55:37 Done.
+
+ MovableObjectFixups& fixups();
+
+ std::unique_ptr<MovableObjectFixups> m_fixups;
+
+ // Set to |true| when a compacting sweep will go ahead.
+ bool m_doCompact;
+ size_t m_gcCountSinceLastCompaction;
+
+ Mutex m_mutex;
+
+ // All threads performing a GC must synchronize on completion
+ // of all heap compactions. Not doing so risks one thread resuming
+ // the mutator, which could perform cross-thread access to a heap
+ // that's still in the process of being compacted.
+ ThreadCondition m_finished;
+
+ // Number of heap threads participating.
+ int m_threadCount;
haraken 2016/12/02 12:43:19 m_mutex => m_threadSyncronizationMutex m_finished
sof 2016/12/04 14:55:38 "threadSynchronization" is implied for these abstr
+
+ // Last reported freelist size, across all heaps.
+ size_t m_freeListAllocations;
+ // If compacting, i'th sub heap will be compacted
+ // if corresponding bit is set.
+ unsigned m_compactableHeaps;
haraken 2016/12/02 12:43:20 m_compactableHeaps => m_compactingArenas ? (c.f.,
sof 2016/12/04 14:55:37 Done, replaced the use of (sub)heaps with arenas,
+
+ // Stats, number of (complete) pages freed/decommitted +
+ // bytes freed (which will include partial pages.)
+ size_t m_freedPages;
+ size_t m_freedSize;
+
+#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
+ int m_startCompaction;
+ double m_startCompactionTimeMS;
+#endif
+
+ static bool s_forceCompactionGC;
+};
+
+} // namespace blink
+
+#endif // HeapCompact_h

Powered by Google App Engine
This is Rietveld 408576698