Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(78)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.h

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
107 STATIC_ONLY(ObjectAliveTrait); 107 STATIC_ONLY(ObjectAliveTrait);
108 public: 108 public:
109 NO_LAZY_SWEEP_SANITIZE_ADDRESS 109 NO_LAZY_SWEEP_SANITIZE_ADDRESS
110 static bool isHeapObjectAlive(T* object) 110 static bool isHeapObjectAlive(T* object)
111 { 111 {
112 static_assert(sizeof(T), "T must be fully defined"); 112 static_assert(sizeof(T), "T must be fully defined");
113 return object->isHeapObjectAlive(); 113 return object->isHeapObjectAlive();
114 } 114 }
115 }; 115 };
116 116
117 // Stats for the heap.
118 class GCHeapStats {
119 public:
120 GCHeapStats();
121 void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseStore(&m_m arkedObjectSizeAtLastCompleteSweep, size); }
122 size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&m_markedO bjectSizeAtLastCompleteSweep); }
123 void increaseAllocatedObjectSize(size_t delta);
124 void decreaseAllocatedObjectSize(size_t delta);
125 size_t allocatedObjectSize() { return acquireLoad(&m_allocatedObjectSize); }
126 void increaseMarkedObjectSize(size_t delta);
127 size_t markedObjectSize() { return acquireLoad(&m_markedObjectSize); }
128 void increaseAllocatedSpace(size_t delta);
129 void decreaseAllocatedSpace(size_t delta);
130 size_t allocatedSpace() { return acquireLoad(&m_allocatedSpace); }
131 size_t objectSizeAtLastGC() { return acquireLoad(&m_objectSizeAtLastGC); }
132 void increaseWrapperCount(size_t delta) { atomicAdd(&m_wrapperCount, static_ cast<long>(delta)); }
133 void decreaseWrapperCount(size_t delta) { atomicSubtract(&m_wrapperCount, st atic_cast<long>(delta)); }
134 size_t wrapperCount() { return acquireLoad(&m_wrapperCount); }
135 size_t wrapperCountAtLastGC() { return acquireLoad(&m_wrapperCountAtLastGC); }
136 void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&m_collectedWra pperCount, static_cast<long>(delta)); }
137 size_t collectedWrapperCount() { return acquireLoad(&m_collectedWrapperCount ); }
138 size_t partitionAllocSizeAtLastGC() { return acquireLoad(&m_partitionAllocSi zeAtLastGC); }
139 void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_ estimatedMarkingTimePerByte = estimatedMarkingTimePerByte; }
140 double estimatedMarkingTimePerByte() const { return m_estimatedMarkingTimePe rByte; }
141 double estimatedMarkingTime();
142 void reset();
143
144 private:
145 size_t m_allocatedSpace;
146 size_t m_allocatedObjectSize;
147 size_t m_objectSizeAtLastGC;
148 size_t m_markedObjectSize;
149 size_t m_markedObjectSizeAtLastCompleteSweep;
150 size_t m_wrapperCount;
151 size_t m_wrapperCountAtLastGC;
152 size_t m_collectedWrapperCount;
153 size_t m_partitionAllocSizeAtLastGC;
154 double m_estimatedMarkingTimePerByte;
155 };
156
157 using ThreadStateSet = HashSet<ThreadState*>;
158
117 class PLATFORM_EXPORT Heap { 159 class PLATFORM_EXPORT Heap {
118 STATIC_ONLY(Heap);
119 public: 160 public:
120 static void init(); 161 Heap();
121 static void shutdown(); 162 ~Heap();
122 static void doShutdown();
123 163
124 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); 164 RecursiveMutex& threadAttachMutex() { return m_threadAttachMutex; }
165 const ThreadStateSet& threads() const { return m_threads; }
166 GCHeapStats& heapStats() { return m_stats; }
167 SafePointBarrier* safePointBarrier() { return m_safePointBarrier.get(); }
168 FreePagePool* freePagePool() { return m_freePagePool.get(); }
169 OrphanedPagePool* orphanedPagePool() { return m_orphanedPagePool.get(); }
170 CallbackStack* markingStack() const { return m_markingStack.get(); }
171 CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallba ckStack.get(); }
172 CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallback Stack.get(); }
173 CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); }
125 174
175 void attach(ThreadState*);
176 void detach(ThreadState*);
177 void lockThreadAttachMutex();
178 void unlockThreadAttachMutex();
179 bool park();
180 void resume();
126 #if ENABLE(ASSERT) 181 #if ENABLE(ASSERT)
127 static BasePage* findPageFromAddress(Address); 182 bool isAtSafePoint() const;
128 static BasePage* findPageFromAddress(const void* pointer) { return findPageF romAddress(reinterpret_cast<Address>(const_cast<void*>(pointer))); } 183 BasePage* findPageFromAddress(Address);
129 #endif 184 #endif
185 void preGC();
186 void postGC(BlinkGC::GCType);
187 size_t objectPayloadSizeForTesting();
188 void visitPersistentRoots(Visitor*);
189 void visitStackRoots(Visitor*);
190 void checkAndPark(ThreadState*, SafePointAwareMutexLocker*);
191 void enterSafePoint(ThreadState*);
192 void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*);
193 void flushHeapDoesNotContainCache();
194
195 // This look-up uses the region search tree and a negative contains cache to
196 // provide an efficient mapping from arbitrary addresses to the containing
197 // heap-page if one exists.
198 BasePage* lookupPageForAddress(Address);
199 void addPageMemoryRegion(PageMemoryRegion*);
200 void removePageMemoryRegion(PageMemoryRegion*);
201
202 // Push a trace callback on the marking stack.
203 void pushTraceCallback(void* containerObject, TraceCallback);
204
205 // Push a trace callback on the post-marking callback stack. These
206 // callbacks are called after normal marking (including ephemeron
207 // iteration).
208 void pushPostMarkingCallback(void*, TraceCallback);
209
210 // Similar to the more general pushThreadLocalWeakCallback, but cell
211 // pointer callbacks are added to a static callback work list and the weak
212 // callback is performed on the thread performing garbage collection. This
213 // is OK because cells are just cleared and no deallocation can happen.
214 void pushGlobalWeakCallback(void** cell, WeakCallback);
215
216 // Pop the top of a marking stack and call the callback with the visitor
217 // and the object. Returns false when there is nothing more to do.
218 bool popAndInvokeTraceCallback(Visitor*);
219
220 // Remove an item from the post-marking callback stack and call
221 // the callback with the visitor and the object pointer. Returns
222 // false when there is nothing more to do.
223 bool popAndInvokePostMarkingCallback(Visitor*);
224
225 // Remove an item from the weak callback work list and call the callback
226 // with the visitor and the closure pointer. Returns false when there is
227 // nothing more to do.
228 bool popAndInvokeGlobalWeakCallback(Visitor*);
229
230 // Register an ephemeron table for fixed-point iteration.
231 void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCa llback);
232 #if ENABLE(ASSERT)
233 bool weakTableRegistered(const void*);
234 #endif
235
236 void decommitCallbackStacks();
237
238 // Conservatively checks whether an address is a pointer in any of the
239 // thread heaps. If so marks the object pointed to as live.
240 Address checkAndMarkPointer(Visitor*, Address);
241
242 void processMarkingStack(Visitor*);
243 void postMarkingProcessing(Visitor*);
244 void globalWeakProcessing(Visitor*);
245
246 // Reset counters that track live and allocated-since-last-GC sizes.
247 void resetHeapCounters();
248
249 // Add a weak pointer callback to the weak callback work list. General
250 // object pointer callbacks are added to a thread local weak callback work
251 // list and the callback is called on the thread that owns the object, with
252 // the closure pointer as an argument. Most of the time, the closure and
253 // the containerObject can be the same thing, but the containerObject is
254 // constrained to be on the heap, since the heap is used to identify the
255 // correct thread.
256 void pushThreadLocalWeakCallback(void* closure, void* containerObject, WeakC allback);
130 257
131 template<typename T> 258 template<typename T>
132 static inline bool isHeapObjectAlive(T* object) 259 static inline bool isHeapObjectAlive(T* object)
133 { 260 {
134 static_assert(sizeof(T), "T must be fully defined"); 261 static_assert(sizeof(T), "T must be fully defined");
135 // The strongification of collections relies on the fact that once a 262 // The strongification of collections relies on the fact that once a
136 // collection has been strongified, there is no way that it can contain 263 // collection has been strongified, there is no way that it can contain
137 // non-live entries, so no entries will be removed. Since you can't set 264 // non-live entries, so no entries will be removed. Since you can't set
138 // the mark bit on a null pointer, that means that null pointers are 265 // the mark bit on a null pointer, that means that null pointers are
139 // always 'alive'. 266 // always 'alive'.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 // to be in; willObjectBeLazilySwept() has undefined behavior if passed 298 // to be in; willObjectBeLazilySwept() has undefined behavior if passed
172 // such a reference. 299 // such a reference.
173 template<typename T> 300 template<typename T>
174 NO_LAZY_SWEEP_SANITIZE_ADDRESS 301 NO_LAZY_SWEEP_SANITIZE_ADDRESS
175 static bool willObjectBeLazilySwept(const T* objectPointer) 302 static bool willObjectBeLazilySwept(const T* objectPointer)
176 { 303 {
177 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); 304 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used.");
178 BasePage* page = pageFromObject(objectPointer); 305 BasePage* page = pageFromObject(objectPointer);
179 if (page->hasBeenSwept()) 306 if (page->hasBeenSwept())
180 return false; 307 return false;
181 ASSERT(page->heap()->threadState()->isSweepingInProgress()); 308 ASSERT(page->arena()->threadState()->isSweepingInProgress());
182 309
183 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer)); 310 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer));
184 } 311 }
185 312
186 // Push a trace callback on the marking stack.
187 static void pushTraceCallback(void* containerObject, TraceCallback);
188
189 // Push a trace callback on the post-marking callback stack. These
190 // callbacks are called after normal marking (including ephemeron
191 // iteration).
192 static void pushPostMarkingCallback(void*, TraceCallback);
193
194 // Add a weak pointer callback to the weak callback work list. General
195 // object pointer callbacks are added to a thread local weak callback work
196 // list and the callback is called on the thread that owns the object, with
197 // the closure pointer as an argument. Most of the time, the closure and
198 // the containerObject can be the same thing, but the containerObject is
199 // constrained to be on the heap, since the heap is used to identify the
200 // correct thread.
201 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback);
202
203 // Similar to the more general pushThreadLocalWeakCallback, but cell
204 // pointer callbacks are added to a static callback work list and the weak
205 // callback is performed on the thread performing garbage collection. This
206 // is OK because cells are just cleared and no deallocation can happen.
207 static void pushGlobalWeakCallback(void** cell, WeakCallback);
208
209 // Pop the top of a marking stack and call the callback with the visitor
210 // and the object. Returns false when there is nothing more to do.
211 static bool popAndInvokeTraceCallback(Visitor*);
212
213 // Remove an item from the post-marking callback stack and call
214 // the callback with the visitor and the object pointer. Returns
215 // false when there is nothing more to do.
216 static bool popAndInvokePostMarkingCallback(Visitor*);
217
218 // Remove an item from the weak callback work list and call the callback
219 // with the visitor and the closure pointer. Returns false when there is
220 // nothing more to do.
221 static bool popAndInvokeGlobalWeakCallback(Visitor*);
222
223 // Register an ephemeron table for fixed-point iteration.
224 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
225 #if ENABLE(ASSERT)
226 static bool weakTableRegistered(const void*);
227 #endif
228
229 static inline size_t allocationSizeFromSize(size_t size) 313 static inline size_t allocationSizeFromSize(size_t size)
230 { 314 {
231 // Check the size before computing the actual allocation size. The 315 // Check the size before computing the actual allocation size. The
232 // allocation size calculation can overflow for large sizes and the chec k 316 // allocation size calculation can overflow for large sizes and the chec k
233 // therefore has to happen before any calculation on the size. 317 // therefore has to happen before any calculation on the size.
234 RELEASE_ASSERT(size < maxHeapObjectSize); 318 RELEASE_ASSERT(size < maxHeapObjectSize);
235 319
236 // Add space for header. 320 // Add space for header.
237 size_t allocationSize = size + sizeof(HeapObjectHeader); 321 size_t allocationSize = size + sizeof(HeapObjectHeader);
238 // Align size with allocation granularity. 322 // Align size with allocation granularity.
239 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 323 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
240 return allocationSize; 324 return allocationSize;
241 } 325 }
242 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); 326 static Address allocateOnArenaIndex(ThreadState*, size_t, int arenaIndex, si ze_t gcInfoIndex);
243 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); 327 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
244 template<typename T> static Address reallocate(void* previous, size_t); 328 template<typename T> static Address reallocate(void* previous, size_t);
245 329
246 static const char* gcReasonString(BlinkGC::GCReason);
247 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason); 330 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason);
248 static void collectGarbageForTerminatingThread(ThreadState*); 331 static void collectGarbageForTerminatingThread(ThreadState*);
249 static void collectAllGarbage(); 332 static void collectAllGarbage();
250 333
251 static void processMarkingStack(Visitor*);
252 static void postMarkingProcessing(Visitor*);
253 static void globalWeakProcessing(Visitor*);
254 static void setForcePreciseGCForTesting();
255
256 static void preGC();
257 static void postGC(BlinkGC::GCType);
258
259 // Conservatively checks whether an address is a pointer in any of the
260 // thread heaps. If so marks the object pointed to as live.
261 static Address checkAndMarkPointer(Visitor*, Address);
262
263 static size_t objectPayloadSizeForTesting();
264
265 static void flushHeapDoesNotContainCache();
266
267 static FreePagePool* freePagePool() { return s_freePagePool; }
268 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
269
270 // This look-up uses the region search tree and a negative contains cache to
271 // provide an efficient mapping from arbitrary addresses to the containing
272 // heap-page if one exists.
273 static BasePage* lookup(Address);
274 static void addPageMemoryRegion(PageMemoryRegion*);
275 static void removePageMemoryRegion(PageMemoryRegion*);
276
277 static const GCInfo* gcInfo(size_t gcInfoIndex) 334 static const GCInfo* gcInfo(size_t gcInfoIndex)
278 { 335 {
279 ASSERT(gcInfoIndex >= 1); 336 ASSERT(gcInfoIndex >= 1);
280 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 337 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
281 ASSERT(s_gcInfoTable); 338 ASSERT(s_gcInfoTable);
282 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; 339 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
283 ASSERT(info); 340 ASSERT(info);
284 return info; 341 return info;
285 } 342 }
286 343
287 static void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseSto re(&s_markedObjectSizeAtLastCompleteSweep, size); } 344 static CrossThreadPersistentRegion& crossThreadPersistentRegion();
288 static size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&s_ markedObjectSizeAtLastCompleteSweep); }
289 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); }
290 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); }
291 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); }
292 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); }
293 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); }
294 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); }
295 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); }
296 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); }
297 static size_t objectSizeAtLastGC() { return acquireLoad(&s_objectSizeAtLastG C); }
298 static void increaseWrapperCount(size_t delta) { atomicAdd(&s_wrapperCount, static_cast<long>(delta)); }
299 static void decreaseWrapperCount(size_t delta) { atomicSubtract(&s_wrapperCo unt, static_cast<long>(delta)); }
300 static size_t wrapperCount() { return acquireLoad(&s_wrapperCount); }
301 static size_t wrapperCountAtLastGC() { return acquireLoad(&s_wrapperCountAtL astGC); }
302 static void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&s_colle ctedWrapperCount, static_cast<long>(delta)); }
303 static size_t collectedWrapperCount() { return acquireLoad(&s_collectedWrapp erCount); }
304 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition AllocSizeAtLastGC); }
305 345
306 static double estimatedMarkingTime(); 346 static const char* gcReasonString(BlinkGC::GCReason);
347
307 static void reportMemoryUsageHistogram(); 348 static void reportMemoryUsageHistogram();
308 static void reportMemoryUsageForTracing(); 349 static void reportMemoryUsageForTracing();
350
351 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); }
352 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); }
353 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); }
354 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); }
355 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); }
356 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalAl locatedSpace, static_cast<long>(delta)); }
357 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); }
358 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); }
359
309 static bool isLowEndDevice() { return s_isLowEndDevice; } 360 static bool isLowEndDevice() { return s_isLowEndDevice; }
310 361
311 #if ENABLE(ASSERT) 362 static void init();
312 static uint16_t gcGeneration() { return s_gcGeneration; } 363 // Shutdown tears down the global objects. If threads are still attached the actual shutdown task is delayed.
313 #endif 364 static void shutdown();
365 // Run the shutdown task if necessary.
366 static void doShutdownIfNecessary();
367
368 static HashSet<Heap*>& all();
369 static RecursiveMutex& heapAttachMutex();
314 370
315 private: 371 private:
316 // Reset counters that track live and allocated-since-last-GC sizes. 372 static int arenaIndexForObjectSize(size_t);
317 static void resetHeapCounters(); 373 static bool isNormalArenaIndex(int);
318 374
319 static int heapIndexForObjectSize(size_t); 375 RecursiveMutex m_threadAttachMutex;
320 static bool isNormalHeapIndex(int); 376 ThreadStateSet m_threads;
377 GCHeapStats m_stats;
378 Mutex m_regionTreeMutex;
379 RegionTree* m_regionTree;
380 OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache;
381 OwnPtr<SafePointBarrier> m_safePointBarrier;
382 OwnPtr<FreePagePool> m_freePagePool;
383 OwnPtr<OrphanedPagePool> m_orphanedPagePool;
384 OwnPtr<CallbackStack> m_markingStack;
385 OwnPtr<CallbackStack> m_postMarkingCallbackStack;
386 OwnPtr<CallbackStack> m_globalWeakCallbackStack;
387 OwnPtr<CallbackStack> m_ephemeronStack;
321 388
322 static void decommitCallbackStacks(); 389 static bool s_shutdownCalled;
390 static bool s_shutdownComplete;
391 static bool s_isLowEndDevice;
323 392
324 static CallbackStack* s_markingStack; 393 // Stats for the entire Oilpan heap.
325 static CallbackStack* s_postMarkingCallbackStack; 394 static size_t s_totalAllocatedSpace;
326 static CallbackStack* s_globalWeakCallbackStack; 395 static size_t s_totalAllocatedObjectSize;
327 static CallbackStack* s_ephemeronStack; 396 static size_t s_totalMarkedObjectSize;
haraken 2016/02/29 11:17:45 Shall we factor out these static variables to anot
328 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
329 static bool s_shutdownCalled;
330 static FreePagePool* s_freePagePool;
331 static OrphanedPagePool* s_orphanedPagePool;
332 static RegionTree* s_regionTree;
333 static size_t s_allocatedSpace;
334 static size_t s_allocatedObjectSize;
335 static size_t s_objectSizeAtLastGC;
336 static size_t s_markedObjectSize;
337 static size_t s_markedObjectSizeAtLastCompleteSweep;
338 static size_t s_wrapperCount;
339 static size_t s_wrapperCountAtLastGC;
340 static size_t s_collectedWrapperCount;
341 static size_t s_partitionAllocSizeAtLastGC;
342 static double s_estimatedMarkingTimePerByte;
343 static bool s_isLowEndDevice;
344 #if ENABLE(ASSERT)
345 static uint16_t s_gcGeneration;
346 #endif
347 397
348 friend class ThreadState; 398 friend class ThreadState;
349 }; 399 };
350 400
351 template<typename T> 401 template<typename T>
352 struct IsEagerlyFinalizedType { 402 struct IsEagerlyFinalizedType {
353 STATIC_ONLY(IsEagerlyFinalizedType); 403 STATIC_ONLY(IsEagerlyFinalizedType);
354 private: 404 private:
355 typedef char YesType; 405 typedef char YesType;
356 struct NoType { 406 struct NoType {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
420 // 470 //
421 // An exception to the use of sized heaps is made for class types that 471 // An exception to the use of sized heaps is made for class types that
422 // require prompt finalization after a garbage collection. That is, their 472 // require prompt finalization after a garbage collection. That is, their
423 // instances have to be finalized early and cannot be delayed until lazy 473 // instances have to be finalized early and cannot be delayed until lazy
424 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() 474 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE()
425 // macro is used to declare a class (and its derived classes) as being 475 // macro is used to declare a class (and its derived classes) as being
426 // in need of eager finalization. Must be defined with 'public' visibility 476 // in need of eager finalization. Must be defined with 'public' visibility
427 // for a class. 477 // for a class.
428 // 478 //
429 479
430 inline int Heap::heapIndexForObjectSize(size_t size) 480 inline int Heap::arenaIndexForObjectSize(size_t size)
431 { 481 {
432 if (size < 64) { 482 if (size < 64) {
433 if (size < 32) 483 if (size < 32)
434 return BlinkGC::NormalPage1HeapIndex; 484 return BlinkGC::NormalPage1ArenaIndex;
435 return BlinkGC::NormalPage2HeapIndex; 485 return BlinkGC::NormalPage2ArenaIndex;
436 } 486 }
437 if (size < 128) 487 if (size < 128)
438 return BlinkGC::NormalPage3HeapIndex; 488 return BlinkGC::NormalPage3ArenaIndex;
439 return BlinkGC::NormalPage4HeapIndex; 489 return BlinkGC::NormalPage4ArenaIndex;
440 } 490 }
441 491
442 inline bool Heap::isNormalHeapIndex(int index) 492 inline bool Heap::isNormalArenaIndex(int index)
443 { 493 {
444 return index >= BlinkGC::NormalPage1HeapIndex && index <= BlinkGC::NormalPag e4HeapIndex; 494 return index >= BlinkGC::NormalPage1ArenaIndex && index <= BlinkGC::NormalPa ge4ArenaIndex;
445 } 495 }
446 496
447 #define DECLARE_EAGER_FINALIZATION_OPERATOR_NEW() \ 497 #define DECLARE_EAGER_FINALIZATION_OPERATOR_NEW() \
448 public: \ 498 public: \
449 GC_PLUGIN_IGNORE("491488") \ 499 GC_PLUGIN_IGNORE("491488") \
450 void* operator new(size_t size) \ 500 void* operator new(size_t size) \
451 { \ 501 { \
452 return allocateObject(size, true); \ 502 return allocateObject(size, true); \
453 } 503 }
454 504
455 #define IS_EAGERLY_FINALIZED() (pageFromObject(this)->heap()->heapIndex() == Bli nkGC::EagerSweepHeapIndex) 505 #define IS_EAGERLY_FINALIZED() (pageFromObject(this)->arena()->arenaIndex() == B linkGC::EagerSweepArenaIndex)
456 #if ENABLE(ASSERT) && ENABLE(OILPAN) 506 #if ENABLE(ASSERT) && ENABLE(OILPAN)
457 class VerifyEagerFinalization { 507 class VerifyEagerFinalization {
458 DISALLOW_NEW(); 508 DISALLOW_NEW();
459 public: 509 public:
460 ~VerifyEagerFinalization() 510 ~VerifyEagerFinalization()
461 { 511 {
462 // If this assert triggers, the class annotated as eagerly 512 // If this assert triggers, the class annotated as eagerly
463 // finalized ended up not being allocated on the heap 513 // finalized ended up not being allocated on the heap
464 // set aside for eager finalization. The reason is most 514 // set aside for eager finalization. The reason is most
465 // likely that the effective 'operator new' overload for 515 // likely that the effective 'operator new' overload for
(...skipping 12 matching lines...) Expand all
478 #else 528 #else
479 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker 529 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker
480 #endif 530 #endif
481 531
482 #if !ENABLE(OILPAN) 532 #if !ENABLE(OILPAN)
483 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE() 533 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE()
484 #else 534 #else
485 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() 535 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
486 #endif 536 #endif
487 537
488 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex) 538 inline Address Heap::allocateOnArenaIndex(ThreadState* state, size_t size, int a renaIndex, size_t gcInfoIndex)
489 { 539 {
490 ASSERT(state->isAllocationAllowed()); 540 ASSERT(state->isAllocationAllowed());
491 ASSERT(heapIndex != BlinkGC::LargeObjectHeapIndex); 541 ASSERT(arenaIndex != BlinkGC::LargeObjectArenaIndex);
492 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); 542 NormalPageArena* heap = static_cast<NormalPageArena*>(state->arena(arenaInde x));
493 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); 543 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex);
494 } 544 }
495 545
496 template<typename T> 546 template<typename T>
497 Address Heap::allocate(size_t size, bool eagerlySweep) 547 Address Heap::allocate(size_t size, bool eagerlySweep)
498 { 548 {
499 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 549 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
500 Address address = Heap::allocateOnHeapIndex(state, size, eagerlySweep ? Blin kGC::EagerSweepHeapIndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::i ndex()); 550 Address address = Heap::allocateOnArenaIndex(state, size, eagerlySweep ? Bli nkGC::EagerSweepArenaIndex : Heap::arenaIndexForObjectSize(size), GCInfoTrait<T> ::index());
501 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T); 551 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T);
502 HeapAllocHooks::allocationHookIfEnabled(address, size, typeName); 552 HeapAllocHooks::allocationHookIfEnabled(address, size, typeName);
503 return address; 553 return address;
504 } 554 }
505 555
506 template<typename T> 556 template<typename T>
507 Address Heap::reallocate(void* previous, size_t size) 557 Address Heap::reallocate(void* previous, size_t size)
508 { 558 {
509 // Not intended to be a full C realloc() substitute; 559 // Not intended to be a full C realloc() substitute;
510 // realloc(nullptr, size) is not a supported alias for malloc(size). 560 // realloc(nullptr, size) is not a supported alias for malloc(size).
511 561
512 // TODO(sof): promptly free the previous object. 562 // TODO(sof): promptly free the previous object.
513 if (!size) { 563 if (!size) {
514 // If the new size is 0 this is considered equivalent to free(previous). 564 // If the new size is 0 this is considered equivalent to free(previous).
515 return nullptr; 565 return nullptr;
516 } 566 }
517 567
518 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 568 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
519 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); 569 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous);
520 BasePage* page = pageFromObject(previousHeader); 570 BasePage* page = pageFromObject(previousHeader);
521 ASSERT(page); 571 ASSERT(page);
522 int heapIndex = page->heap()->heapIndex(); 572 int arenaIndex = page->arena()->arenaIndex();
523 // Recompute the effective heap index if previous allocation 573 // Recompute the effective heap index if previous allocation
524 // was on the normal heaps or a large object. 574 // was on the normal heaps or a large object.
525 if (isNormalHeapIndex(heapIndex) || heapIndex == BlinkGC::LargeObjectHeapInd ex) 575 if (isNormalArenaIndex(arenaIndex) || arenaIndex == BlinkGC::LargeObjectAren aIndex)
526 heapIndex = heapIndexForObjectSize(size); 576 arenaIndex = arenaIndexForObjectSize(size);
527 577
528 // TODO(haraken): We don't support reallocate() for finalizable objects. 578 // TODO(haraken): We don't support reallocate() for finalizable objects.
529 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); 579 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
530 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); 580 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index());
531 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr ait<T>::index()); 581 Address address = Heap::allocateOnArenaIndex(state, size, arenaIndex, GCInfo Trait<T>::index());
532 size_t copySize = previousHeader->payloadSize(); 582 size_t copySize = previousHeader->payloadSize();
533 if (copySize > size) 583 if (copySize > size)
534 copySize = size; 584 copySize = size;
535 memcpy(address, previous, copySize); 585 memcpy(address, previous, copySize);
536 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T); 586 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T);
537 HeapAllocHooks::reallocHookIfEnabled(static_cast<Address>(previous), address , size, typeName); 587 HeapAllocHooks::reallocHookIfEnabled(static_cast<Address>(previous), address , size, typeName);
538 return address; 588 return address;
539 } 589 }
540 590
541 template<typename Derived> 591 template<typename Derived>
542 template<typename T> 592 template<typename T>
543 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) 593 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object)
544 { 594 {
545 T** cell = reinterpret_cast<T**>(object); 595 T** cell = reinterpret_cast<T**>(object);
546 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) 596 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell))
547 *cell = nullptr; 597 *cell = nullptr;
548 } 598 }
549 599
550 } // namespace blink 600 } // namespace blink
551 601
552 #endif // Heap_h 602 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698