Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(233)

Side by Side Diff: Source/platform/heap/Handle.h

Issue 556823003: Revert "Revert of [oilpan]: optimize the way we allocate persistent handles in wrappers. (patchset … (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2014 Google Inc. All rights reserved. 2 * Copyright (C) 2014 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 143
144 private: 144 private:
145 PersistentNode* m_next; 145 PersistentNode* m_next;
146 PersistentNode* m_prev; 146 PersistentNode* m_prev;
147 147
148 template<typename RootsAccessor, typename Owner> friend class PersistentBase ; 148 template<typename RootsAccessor, typename Owner> friend class PersistentBase ;
149 friend class PersistentAnchor; 149 friend class PersistentAnchor;
150 friend class ThreadState; 150 friend class ThreadState;
151 }; 151 };
152 152
153
154 const int wrapperPersistentsPerRegion = 256;
155 const size_t wrapperPersistentOffsetMask = ~static_cast<size_t>(3);
156 const size_t wrapperPersistentLiveBitMask = 1;
157
158 class WrapperPersistentNode {
159 ALLOW_ONLY_INLINE_ALLOCATION()
Mads Ager (chromium) 2014/09/09 11:29:23 Nit: Can we put a ';' at the end here. Makes it lo
wibling-chromium 2014/09/09 11:32:48 Disallow dynamic allocation. We only allow placeme
wibling-chromium 2014/09/09 11:49:50 Done.
160 WTF_MAKE_NONCOPYABLE(WrapperPersistentNode);
161 public:
162 bool isAlive() { return m_regionOffset & wrapperPersistentLiveBitMask; }
zerny-chromium 2014/09/09 11:33:11 Nit: I'd add the corresponding accessor for: si
wibling-chromium 2014/09/09 11:49:50 Done.
163
164 WrapperPersistentRegion* region()
165 {
166 return reinterpret_cast<WrapperPersistentRegion*>(
167 reinterpret_cast<Address>(this) - (m_regionOffset & wrapperPersisten tOffsetMask));
168 }
169
170 virtual void trace(Visitor* visitor) { }
171
172 static inline void destroy(const WrapperPersistentNode*);
wibling-chromium 2014/09/09 11:32:47 Use destroy(...) instead of calling delete on the
173
174 protected:
175 WrapperPersistentNode() : m_raw(0), m_regionOffset(0) { }
176 WrapperPersistentNode(void *raw, size_t regionOffset) : m_raw(raw), m_region Offset(regionOffset) { }
wibling-chromium 2014/09/09 11:32:47 New constructor taking the regionOffset to ensure
177
178 private:
179 WrapperPersistentNode* takeSlot()
180 {
181 // The slot should not be alive at the point where it is allocated.
182 ASSERT(!isAlive());
183 WrapperPersistentNode* nextFree = reinterpret_cast<WrapperPersistentNode *>(m_raw);
184 m_raw = 0;
185 return nextFree;
186 }
187
188 WrapperPersistentNode* freeSlot(WrapperPersistentNode* nextFree)
189 {
190 m_regionOffset &= ~wrapperPersistentLiveBitMask;
191 m_raw = nextFree;
192 return this;
193 }
194
195 // Don't allow delete being called on wrapper persistent nodes. We
196 // do use placement new to initialize the slot with the right vtable. See
197 // WrapperPersistent<T> below.
198 void operator delete(void*);
wibling-chromium 2014/09/09 11:32:47 Ensure no one calls delete.
199
200 protected:
201 // m_raw is used both to point to the object when the WrapperPersistentNode is used/alive
202 // and to point to the next free wrapperPersistentNode in the region when th e node is
203 // unused/dead.
204 void* m_raw;
205
206 // The m_regionOffset field is an offset from this node to the base of the c ontaining
207 // WrapperPersistentRegion.
zerny-chromium 2014/09/09 11:33:11 The m_regionOffset field encodes the liveness of t
wibling-chromium 2014/09/09 11:49:50 Done.
208 size_t m_regionOffset;
209
210 friend class WrapperPersistentRegion;
211 };
212
213 template<typename T>
214 class WrapperPersistent FINAL : public WrapperPersistentNode {
215 ALLOW_ONLY_INLINE_ALLOCATION()
wibling-chromium 2014/09/09 11:32:47 Only allow placement new.
216 public:
217 static WrapperPersistent<T>* create(T* raw);
wibling-chromium 2014/09/09 11:32:48 Allocate WrapperPersistent<T> via the create metho
218
219 virtual void trace(Visitor* visitor)
zerny-chromium 2014/09/09 11:33:11 Nit: OVERRIDE
wibling-chromium 2014/09/09 11:49:50 Done.
220 {
221 ASSERT(isAlive());
222 visitor->mark(static_cast<T*>(m_raw));
223 }
224
225 private:
226 WrapperPersistent() { }
227
228 // We need to use a constructor to initialize the allocated slot since it
229 // has a vtable which must be set to the WrapperPersistent<T> type.
230 WrapperPersistent(T* raw, size_t regionOffset) : WrapperPersistentNode(raw, regionOffset) { }
wibling-chromium 2014/09/09 11:32:48 New constructor taking both the raw pointer and th
231
232 // Don't allow delete being called on wrapper persistents.
233 void operator delete(void*);
wibling-chromium 2014/09/09 11:32:48 Disallow delete.
234 };
235
236 class PLATFORM_EXPORT WrapperPersistentRegion {
237 WTF_MAKE_NONCOPYABLE(WrapperPersistentRegion);
238 public:
239 WrapperPersistentRegion()
240 {
241 WrapperPersistentNode* nextFree = 0;
242 for (int i = wrapperPersistentsPerRegion - 1; i >= 0; --i) {
243 size_t regionOffset = reinterpret_cast<Address>(&m_entries[i]) - rei nterpret_cast<Address>(this);
244 // Setup the free slot with an offset to the containing region's bas e and a pointer to the next
245 // free slot in the region.
246 ASSERT(!(regionOffset & ~wrapperPersistentOffsetMask));
247 new (&m_entries[i]) WrapperPersistentNode(nextFree, regionOffset);
248 nextFree = &m_entries[i];
249 }
250 m_prev = 0;
251 m_next = 0;
252 m_freeHead = nextFree;
253 m_count = 0;
254 }
255
256 Address allocate()
257 {
258 if (!m_freeHead) {
259 ASSERT(m_count == wrapperPersistentsPerRegion);
260 return 0;
261 }
262 // We have a free persistent slot in this region.
263 WrapperPersistentNode* freeSlot = m_freeHead;
264 // Take the slot and advance m_freeHead to the next free slot.
265 m_freeHead = freeSlot->takeSlot();
266 ASSERT(m_count < wrapperPersistentsPerRegion);
267 m_count++;
268 return reinterpret_cast<Address>(freeSlot);
269 }
270
271 void free(WrapperPersistentNode* object)
272 {
273 ASSERT(object);
274 m_freeHead = object->freeSlot(m_freeHead);
275 ASSERT(m_count > 0);
276 m_count--;
277 if (!m_count)
278 ThreadState::current()->freeWrapperPersistentRegion(this);
279 }
280
281 bool removeIfNotLast(WrapperPersistentRegion** headPtr);
282 static void insertHead(WrapperPersistentRegion** headPtr, WrapperPersistentR egion* newHead);
283 static WrapperPersistentRegion* removeHead(WrapperPersistentRegion** headPtr );
284 static Address outOfLineAllocate(ThreadState*, WrapperPersistentRegion**);
285 static void trace(WrapperPersistentRegion* head, Visitor* visitor)
286 {
287 for (WrapperPersistentRegion* current = head; current; current = current ->m_next)
288 current->traceRegion(visitor);
289 }
290
291 private:
292 void traceRegion(Visitor* visitor)
293 {
294 size_t live = 0;
295
296 #ifdef NDEBUG
297 for (int i = 0; i < wrapperPersistentsPerRegion && live < m_count; ++i) {
298 #else
299 // In DEBUG mode we scan all entries to validate we only have m_count
300 // live entries.
301 for (int i = 0; i < wrapperPersistentsPerRegion; ++i) {
302 #endif
303 if (m_entries[i].isAlive()) {
304 m_entries[i].trace(visitor);
305 live++;
306 }
307 }
308 ASSERT(live == m_count);
309 }
310
311 WrapperPersistentRegion* m_prev;
312 WrapperPersistentRegion* m_next;
313 WrapperPersistentNode* m_freeHead;
314 size_t m_count;
315 WrapperPersistentNode m_entries[wrapperPersistentsPerRegion];
316 };
317
318 template<typename T>
319 WrapperPersistent<T>* WrapperPersistent<T>::create(T* raw)
wibling-chromium 2014/09/09 11:32:48 New create and destroy...
320 {
321 ThreadState* state = ThreadState::current();
322 WrapperPersistentRegion* region = state->wrapperRoots();
323 ASSERT(region);
324 Address persistentSlot = region->allocate();
325 if (!persistentSlot)
326 persistentSlot = WrapperPersistentRegion::outOfLineAllocate(state, &regi on);
327 ASSERT(persistentSlot);
328 ASSERT(!reinterpret_cast<WrapperPersistentNode*>(persistentSlot)->isAlive()) ;
329
330 size_t regionOffset = persistentSlot - reinterpret_cast<Address>(region);
331 regionOffset |= wrapperPersistentLiveBitMask;
zerny-chromium 2014/09/09 11:33:11 Nit: Would it not be more natural to have this OR
wibling-chromium 2014/09/09 11:49:50 I can't do that as the code is now, since the cons
332
333 // We use placement new to call the constructor to ensure that we setup vtab le correctly.
Mads Ager (chromium) 2014/09/09 11:29:23 vtable -> the vtable
wibling-chromium 2014/09/09 11:49:49 Done.
334 return new (persistentSlot) WrapperPersistent<T>(raw, regionOffset);
335 }
336
337 void WrapperPersistentNode::destroy(const WrapperPersistentNode* node)
338 {
339 WrapperPersistentNode* persistent = const_cast<WrapperPersistentNode*>(node) ;
340 persistent->region()->free(persistent);
341 }
342
153 // RootsAccessor for Persistent that provides access to thread-local list 343 // RootsAccessor for Persistent that provides access to thread-local list
154 // of persistent handles. Can only be used to create handles that 344 // of persistent handles. Can only be used to create handles that
155 // are constructed and destructed on the same thread. 345 // are constructed and destructed on the same thread.
156 template<ThreadAffinity Affinity> 346 template<ThreadAffinity Affinity>
157 class ThreadLocalPersistents { 347 class ThreadLocalPersistents {
158 public: 348 public:
159 static PersistentNode* roots() { return state()->roots(); } 349 static PersistentNode* roots() { return state()->roots(); }
160 350
161 // No locking required. Just check that we are at the right thread. 351 // No locking required. Just check that we are at the right thread.
162 class Lock { 352 class Lock {
(...skipping 1038 matching lines...) Expand 10 before | Expand all | Expand 10 after
1201 struct ParamStorageTraits<T*> : public PointerParamStorageTraits<T*, blink::IsGa rbageCollectedType<T>::value> { 1391 struct ParamStorageTraits<T*> : public PointerParamStorageTraits<T*, blink::IsGa rbageCollectedType<T>::value> {
1202 }; 1392 };
1203 1393
1204 template<typename T> 1394 template<typename T>
1205 struct ParamStorageTraits<RawPtr<T> > : public PointerParamStorageTraits<T*, bli nk::IsGarbageCollectedType<T>::value> { 1395 struct ParamStorageTraits<RawPtr<T> > : public PointerParamStorageTraits<T*, bli nk::IsGarbageCollectedType<T>::value> {
1206 }; 1396 };
1207 1397
1208 } // namespace WTF 1398 } // namespace WTF
1209 1399
1210 #endif 1400 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698