Index: Source/platform/heap/Handle.h |
diff --git a/Source/platform/heap/Handle.h b/Source/platform/heap/Handle.h |
index 62c7734053274e02f73723c59bfffb6f39cc9841..277b9f7b6bea525d10879b3a9ec41fab7e1ee64e 100644 |
--- a/Source/platform/heap/Handle.h |
+++ b/Source/platform/heap/Handle.h |
@@ -150,6 +150,199 @@ private: |
friend class ThreadState; |
}; |
+ |
+const int wrapperPersistentsPerRegion = 256; |
+const size_t wrapperPersistentOffsetMask = ~static_cast<size_t>(3); |
+const size_t wrapperPersistentLiveBitMask = 1; |
+ |
+class WrapperPersistentNode { |
+ ALLOW_ONLY_INLINE_ALLOCATION(); |
+ WTF_MAKE_NONCOPYABLE(WrapperPersistentNode); |
+public: |
+ bool isAlive() { return m_regionOffset & wrapperPersistentLiveBitMask; } |
+ |
+ WrapperPersistentRegion* region() |
+ { |
+ return reinterpret_cast<WrapperPersistentRegion*>( |
+ reinterpret_cast<Address>(this) - regionOffset()); |
+ } |
+ |
+ virtual void trace(Visitor* visitor) { } |
+ |
+ static inline void destroy(const WrapperPersistentNode*); |
+ |
+protected: |
+ WrapperPersistentNode() : m_raw(0), m_regionOffset(0) { } |
+ WrapperPersistentNode(void *raw, size_t regionOffset) : m_raw(raw), m_regionOffset(regionOffset) { } |
+ |
+private: |
+ size_t regionOffset() { return m_regionOffset & wrapperPersistentOffsetMask; } |
+ |
+ WrapperPersistentNode* takeSlot() |
+ { |
+ // The slot should not be alive at the point where it is allocated. |
+ ASSERT(!isAlive()); |
+ WrapperPersistentNode* nextFree = reinterpret_cast<WrapperPersistentNode*>(m_raw); |
+ m_raw = 0; |
+ return nextFree; |
+ } |
+ |
+ WrapperPersistentNode* freeSlot(WrapperPersistentNode* nextFree) |
+ { |
+ m_regionOffset &= ~wrapperPersistentLiveBitMask; |
+ m_raw = nextFree; |
+ return this; |
+ } |
+ |
+ // Don't allow delete being called on wrapper persistent nodes. We |
+ // do use placement new to initialize the slot with the right vtable. See |
+ // WrapperPersistent<T> below. |
+ void operator delete(void*); |
+ |
+protected: |
+ // m_raw is used both to point to the object when the WrapperPersistentNode is used/alive |
+ // and to point to the next free wrapperPersistentNode in the region when the node is |
+ // unused/dead. |
+ void* m_raw; |
+ |
+ // The m_regionOffset field encodes liveness of the slot as well as being an |
+ // offset from this node to the base of the containing WrapperPersistentRegion. |
+ size_t m_regionOffset; |
+ |
+ friend class WrapperPersistentRegion; |
+}; |
+ |
+template<typename T> |
+class WrapperPersistent FINAL : public WrapperPersistentNode { |
+ ALLOW_ONLY_INLINE_ALLOCATION(); |
+public: |
+ static WrapperPersistent<T>* create(T* raw); |
+ |
+ virtual void trace(Visitor* visitor) OVERRIDE |
+ { |
+ ASSERT(isAlive()); |
+ visitor->mark(static_cast<T*>(m_raw)); |
+ } |
+ |
+private: |
+ WrapperPersistent() { } |
+ |
+ // We need to use a constructor to initialize the allocated slot since it |
+ // has a vtable which must be set to the WrapperPersistent<T> type. |
+ WrapperPersistent(T* raw, size_t regionOffset) : WrapperPersistentNode(raw, regionOffset) { } |
+ |
+ // Don't allow delete being called on wrapper persistents. |
+ void operator delete(void*); |
+}; |
+ |
+class PLATFORM_EXPORT WrapperPersistentRegion { |
+ WTF_MAKE_NONCOPYABLE(WrapperPersistentRegion); |
+public: |
+ WrapperPersistentRegion() |
+ { |
+ WrapperPersistentNode* nextFree = 0; |
+ for (int i = wrapperPersistentsPerRegion - 1; i >= 0; --i) { |
+ size_t regionOffset = reinterpret_cast<Address>(&m_entries[i]) - reinterpret_cast<Address>(this); |
+ // Setup the free slot with an offset to the containing region's base and a pointer to the next |
+ // free slot in the region. |
+ ASSERT(!(regionOffset & ~wrapperPersistentOffsetMask)); |
+ new (&m_entries[i]) WrapperPersistentNode(nextFree, regionOffset); |
+ nextFree = &m_entries[i]; |
+ } |
+ m_prev = 0; |
+ m_next = 0; |
+ m_freeHead = nextFree; |
+ m_count = 0; |
+ } |
+ |
+ Address allocate() |
+ { |
+ if (!m_freeHead) { |
+ ASSERT(m_count == wrapperPersistentsPerRegion); |
+ return 0; |
+ } |
+ // We have a free persistent slot in this region. |
+ WrapperPersistentNode* freeSlot = m_freeHead; |
+ // Take the slot and advance m_freeHead to the next free slot. |
+ m_freeHead = freeSlot->takeSlot(); |
+ ASSERT(m_count < wrapperPersistentsPerRegion); |
+ m_count++; |
+ return reinterpret_cast<Address>(freeSlot); |
+ } |
+ |
+ void free(WrapperPersistentNode* object) |
+ { |
+ ASSERT(object); |
+ m_freeHead = object->freeSlot(m_freeHead); |
+ ASSERT(m_count > 0); |
+ m_count--; |
+ if (!m_count) |
+ ThreadState::current()->freeWrapperPersistentRegion(this); |
+ } |
+ |
+ bool removeIfNotLast(WrapperPersistentRegion** headPtr); |
+ static void insertHead(WrapperPersistentRegion** headPtr, WrapperPersistentRegion* newHead); |
+ static WrapperPersistentRegion* removeHead(WrapperPersistentRegion** headPtr); |
+ static Address outOfLineAllocate(ThreadState*, WrapperPersistentRegion**); |
+ static void trace(WrapperPersistentRegion* head, Visitor* visitor) |
+ { |
+ for (WrapperPersistentRegion* current = head; current; current = current->m_next) |
+ current->traceRegion(visitor); |
+ } |
+ |
+private: |
+ void traceRegion(Visitor* visitor) |
+ { |
+ size_t live = 0; |
+ |
+#ifdef NDEBUG |
+ for (int i = 0; i < wrapperPersistentsPerRegion && live < m_count; ++i) { |
+#else |
+ // In DEBUG mode we scan all entries to validate we only have m_count |
+ // live entries. |
+ for (int i = 0; i < wrapperPersistentsPerRegion; ++i) { |
+#endif |
+ if (m_entries[i].isAlive()) { |
+ m_entries[i].trace(visitor); |
+ live++; |
+ } |
+ } |
+ ASSERT(live == m_count); |
+ } |
+ |
+ WrapperPersistentRegion* m_prev; |
+ WrapperPersistentRegion* m_next; |
+ WrapperPersistentNode* m_freeHead; |
+ size_t m_count; |
+ WrapperPersistentNode m_entries[wrapperPersistentsPerRegion]; |
+}; |
+ |
+template<typename T> |
+WrapperPersistent<T>* WrapperPersistent<T>::create(T* raw) |
+{ |
+ ThreadState* state = ThreadState::current(); |
+ WrapperPersistentRegion* region = state->wrapperRoots(); |
+ ASSERT(region); |
+ Address persistentSlot = region->allocate(); |
+ if (!persistentSlot) |
+ persistentSlot = WrapperPersistentRegion::outOfLineAllocate(state, ®ion); |
+ ASSERT(persistentSlot); |
+ ASSERT(!reinterpret_cast<WrapperPersistentNode*>(persistentSlot)->isAlive()); |
+ |
+ size_t regionOffset = persistentSlot - reinterpret_cast<Address>(region); |
+ regionOffset |= wrapperPersistentLiveBitMask; |
+ |
+ // We use placement new to call the constructor to ensure that we setup the |
+ // vtable correctly. |
+ return new (persistentSlot) WrapperPersistent<T>(raw, regionOffset); |
+} |
+ |
+void WrapperPersistentNode::destroy(const WrapperPersistentNode* node) |
+{ |
+ WrapperPersistentNode* persistent = const_cast<WrapperPersistentNode*>(node); |
+ persistent->region()->free(persistent); |
+} |
+ |
// RootsAccessor for Persistent that provides access to thread-local list |
// of persistent handles. Can only be used to create handles that |
// are constructed and destructed on the same thread. |