| Index: third_party/WebKit/Source/platform/heap/HeapCompact.h
|
| diff --git a/third_party/WebKit/Source/platform/heap/HeapCompact.h b/third_party/WebKit/Source/platform/heap/HeapCompact.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..c1b8254f59f3d5a471a276886302a6c62b4cf589
|
| --- /dev/null
|
| +++ b/third_party/WebKit/Source/platform/heap/HeapCompact.h
|
| @@ -0,0 +1,221 @@
|
| +// Copyright 2016 Opera Software AS. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#ifndef HeapCompact_h
|
| +#define HeapCompact_h
|
| +
|
| +#include "platform/PlatformExport.h"
|
| +#include "platform/heap/BlinkGC.h"
|
| +#include "wtf/PtrUtil.h"
|
| +#include "wtf/Vector.h"
|
| +
|
| +#include <bitset>
|
| +#include <utility>
|
| +
|
| +// Global dev/debug switches:
|
| +
|
| +// Set to 0 to prevent compaction GCs, disabling the heap compaction feature.
|
| +#define ENABLE_HEAP_COMPACTION 1
|
| +
|
| +// Emit debug info during compaction.
|
| +#define DEBUG_HEAP_COMPACTION 0
|
| +
|
| +// Emit stats on freelist occupancy.
|
| +// 0 - disabled, 1 - minimal, 2 - verbose.
|
| +#define DEBUG_HEAP_FREELIST 0
|
| +
|
| +// Log the amount of time spent compacting.
|
| +#define DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME 0
|
| +
|
| +// Compact during all idle + precise GCs; for debugging.
|
| +#define STRESS_TEST_HEAP_COMPACTION 0
|
| +
|
| +#if OS(WIN)
|
| +// TODO: use standard logging facilities.
|
| +#define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \
|
| + do { \
|
| + char output[512]; \
|
| + sprintf(output, msg, ##__VA_ARGS__); \
|
| + OutputDebugStringA(output); \
|
| + } while (0)
|
| +#else
|
| +#define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \
|
| + fprintf(stderr, msg, ##__VA_ARGS__)
|
| +#endif
|
| +
|
| +#if DEBUG_HEAP_COMPACTION
|
| +#define LOG_HEAP_COMPACTION(msg, ...) \
|
| + LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
|
| +#else
|
| +#define LOG_HEAP_COMPACTION(msg, ...) \
|
| + do { \
|
| + } while (0)
|
| +#endif
|
| +
|
| +#if DEBUG_HEAP_FREELIST
|
| +#define LOG_HEAP_FREELIST(msg, ...) \
|
| + LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
|
| +#else
|
| +#define LOG_HEAP_FREELIST(msg, ...) \
|
| + do { \
|
| + } while (0)
|
| +#endif
|
| +
|
| +#if DEBUG_HEAP_FREELIST == 2
|
| +#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
|
| + LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
|
| +#else
|
| +#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
|
| + do { \
|
| + } while (0)
|
| +#endif
|
| +
|
| +namespace blink {
|
| +
|
| +class NormalPageArena;
|
| +class BasePage;
|
| +class ThreadHeap;
|
| +class ThreadState;
|
| +
|
| +class PLATFORM_EXPORT HeapCompact final {
|
| + public:
|
| + static std::unique_ptr<HeapCompact> create() {
|
| + return std::unique_ptr<HeapCompact>(new HeapCompact);
|
| + }
|
| +
|
| + ~HeapCompact();
|
| +
|
| + // Check if a GC for the given type and reason should perform additional
|
| + // heap compaction once it has run.
|
| + //
|
| + // If deemed worthy, heap compaction is implicitly initialized and set up.
|
| + void checkIfCompacting(ThreadHeap*,
|
| + Visitor*,
|
| + BlinkGC::GCType,
|
| + BlinkGC::GCReason);
|
| +
|
| + // Returns true if the ongoing GC will perform compaction.
|
| + bool isCompacting() const { return m_doCompact; }
|
| +
|
| + // Returns true if the ongoing GC will perform compaction.
|
| + bool isCompactingArena(int arenaIndex) const {
|
| + return m_doCompact && (m_compactableHeaps & (0x1u << arenaIndex));
|
| + }
|
| +
|
| + // Returns |true| if the ongoing GC may compact the given arena/sub-heap.
|
| + static bool isCompactableArena(int arenaIndex) {
|
| + return arenaIndex >= BlinkGC::Vector1ArenaIndex &&
|
| + arenaIndex <= BlinkGC::HashTableArenaIndex;
|
| + }
|
| +
|
| + // See |Heap::registerMovingObjectReference()| documentation.
|
| + void registerMovingObjectReference(MovableReference* slot);
|
| +
|
| + // See |Heap::registerMovingObjectCallback()| documentation.
|
| + void registerMovingObjectCallback(MovableReference,
|
| + MovingObjectCallback,
|
| + void* callbackData);
|
| +
|
| + // Register |slot| as containing a reference to the interior of a movable
|
| + // object.
|
| + //
|
| + // |registerMovingObjectReference()| handles the common case of holding an
|
| + // external
|
| + // reference to a backing store object. |registerRelocation()| handles the
|
| + // relocation
|
| + // of external references into backing store objects - something very rarely
|
| + // done by
|
| + // the Blink codebase, but a possibility.
|
| + void registerRelocation(MovableReference* slot);
|
| +
|
| + // Signal that the compaction pass is being started, finished by some
|
| + // ThreadState.
|
| + void startCompacting(ThreadState*);
|
| + void finishedCompacting(ThreadState*);
|
| +
|
| + // Perform any relocation post-processing after having completed compacting
|
| + // the
|
| + // given sub heap. Pass along the number of pages that were freed from the
|
| + // arena,
|
| + // along with their total size.
|
| + void finishedArenaCompaction(NormalPageArena*,
|
| + size_t freedPages,
|
| + size_t freedSize);
|
| +
|
| + // Record the main thread's compactable freelist residency (in bytes),
|
| + // along with with overall size. Sizes are relative to the compactable
|
| + // sub-heaps, and not a total count. Along with the totals, per-heap
|
| + // numbers are also provided.
|
| + //
|
| + // The recording is done after the decision has been made on whether
|
| + // or not to compact during the _current_ GC. If compacting, the size
|
| + // sampling will be ignored and the internal counters are reset.
|
| + //
|
| + // However, if not compacting, the values will be consulted the next time
|
| + // a GC goes ahead and it decides whether to compact or not.
|
| + void setHeapResidency(
|
| + size_t liveSize,
|
| + size_t freeSize,
|
| + const Vector<std::pair<size_t, size_t>>& heapResidencies);
|
| +
|
| + // Register the heap page as containing live objects that will all be
|
| + // compacted. When the GC is compacting, that is.
|
| + void addCompactablePage(BasePage*);
|
| +
|
| + // Notify heap compaction that object at |from| has been moved to.. |to|.
|
| + // (Called by the sweep compaction pass.)
|
| + void movedObject(Address from, Address to);
|
| +
|
| + // For unit testing only: arrange for a compaction GC to be triggered
|
| + // next time a non-conservative GC is run. Sets the compact-next flag
|
| + // to the new value, returning old.
|
| + static bool scheduleCompactionGCForTesting(bool);
|
| +
|
| + private:
|
| + class MovableObjectFixups;
|
| +
|
| + HeapCompact();
|
| +
|
| + // Parameters controlling when compaction should be done:
|
| +
|
| + // Number of GCs that must have passed since last compaction GC.
|
| + static const int kCompactIntervalThreshold = 10;
|
| +
|
| + // Freelist size threshold that must be exceeded before compaction
|
| + // should be considered.
|
| + static const size_t kFreeThreshold = 512 * 1024;
|
| +
|
| + MovableObjectFixups& fixups();
|
| +
|
| + std::unique_ptr<MovableObjectFixups> m_fixups;
|
| +
|
| + // Set to |true| when a compacting sweep will go ahead.
|
| + bool m_doCompact;
|
| + size_t m_gcCountSinceLastCompaction;
|
| +
|
| + // Number of heap threads participating in the compaction.
|
| + int m_threadCount;
|
| +
|
| + // Last reported freelist size, across all heaps.
|
| + size_t m_freeListAllocations;
|
| + // If compacting, i'th sub heap will be compacted
|
| + // if corresponding bit is set.
|
| + unsigned m_compactableHeaps;
|
| +
|
| + // Stats, number of (complete) pages freed/decommitted +
|
| + // bytes freed (which will include partial pages.)
|
| + size_t m_freedPages;
|
| + size_t m_freedSize;
|
| +
|
| +#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
|
| + int m_startCompaction;
|
| + double m_startCompactionTimeMS;
|
| +#endif
|
| +
|
| + static bool s_forceCompactionGC;
|
| +};
|
| +
|
| +} // namespace blink
|
| +
|
| +#endif // HeapCompact_h
|
|
|