OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 // non-live entries, so no entries will be removed. Since you can't set | 89 // non-live entries, so no entries will be removed. Since you can't set |
90 // the mark bit on a null pointer, that means that null pointers are | 90 // the mark bit on a null pointer, that means that null pointers are |
91 // always 'alive'. | 91 // always 'alive'. |
92 if (!object) | 92 if (!object) |
93 return true; | 93 return true; |
94 return ObjectAliveTrait<T>::isHeapObjectAlive(object); | 94 return ObjectAliveTrait<T>::isHeapObjectAlive(object); |
95 } | 95 } |
96 template<typename T> | 96 template<typename T> |
97 static inline bool isHeapObjectAlive(const Member<T>& member) | 97 static inline bool isHeapObjectAlive(const Member<T>& member) |
98 { | 98 { |
99 return isHeapObjectAlive(member.get()); | 99 return isHeapObjectAlive(member.unsafeGet()); |
100 } | 100 } |
101 template<typename T> | 101 template<typename T> |
102 static inline bool isHeapObjectAlive(const WeakMember<T>& member) | 102 static inline bool isHeapObjectAlive(const WeakMember<T>& member) |
103 { | 103 { |
104 return isHeapObjectAlive(member.get()); | 104 return isHeapObjectAlive(member.unsafeGet()); |
105 } | 105 } |
106 template<typename T> | 106 template<typename T> |
107 static inline bool isHeapObjectAlive(const UntracedMember<T>& member) | 107 static inline bool isHeapObjectAlive(const UntracedMember<T>& member) |
108 { | 108 { |
109 return isHeapObjectAlive(member.get()); | 109 return isHeapObjectAlive(member.unsafeGet()); |
110 } | 110 } |
111 template<typename T> | 111 template<typename T> |
112 static inline bool isHeapObjectAlive(const RawPtr<T>& ptr) | 112 static inline bool isHeapObjectAlive(const RawPtr<T>& ptr) |
113 { | 113 { |
114 return isHeapObjectAlive(ptr.get()); | 114 return isHeapObjectAlive(ptr.get()); |
115 } | 115 } |
116 | 116 |
117 // Is the finalizable GC object still alive, but slated for lazy sweeping? | 117 // Is the finalizable GC object still alive, but slated for lazy sweeping? |
118 // If a lazy sweep is in progress, returns true if the object was found | 118 // If a lazy sweep is in progress, returns true if the object was found |
119 // to be not reachable during the marking phase, but it has yet to be swept | 119 // to be not reachable during the marking phase, but it has yet to be swept |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
252 static size_t wrapperCount() { return acquireLoad(&s_wrapperCount); } | 252 static size_t wrapperCount() { return acquireLoad(&s_wrapperCount); } |
253 static size_t wrapperCountAtLastGC() { return acquireLoad(&s_wrapperCountAtL
astGC); } | 253 static size_t wrapperCountAtLastGC() { return acquireLoad(&s_wrapperCountAtL
astGC); } |
254 static void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&s_colle
ctedWrapperCount, static_cast<long>(delta)); } | 254 static void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&s_colle
ctedWrapperCount, static_cast<long>(delta)); } |
255 static size_t collectedWrapperCount() { return acquireLoad(&s_collectedWrapp
erCount); } | 255 static size_t collectedWrapperCount() { return acquireLoad(&s_collectedWrapp
erCount); } |
256 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition
AllocSizeAtLastGC); } | 256 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition
AllocSizeAtLastGC); } |
257 | 257 |
258 static double estimatedMarkingTime(); | 258 static double estimatedMarkingTime(); |
259 static void reportMemoryUsageHistogram(); | 259 static void reportMemoryUsageHistogram(); |
260 static void reportMemoryUsageForTracing(); | 260 static void reportMemoryUsageForTracing(); |
261 | 261 |
262 #if ENABLE(ASSERT) | 262 static uint32_t gcGeneration() |
263 static uint16_t gcGeneration() { return s_gcGeneration; } | 263 { |
264 #endif | 264 ASSERT(s_gcGeneration != gcGenerationUnchecked); |
| 265 ASSERT(s_gcGeneration != gcGenerationForFreeListEntry); |
| 266 return s_gcGeneration; |
| 267 } |
265 | 268 |
266 private: | 269 private: |
267 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted | 270 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted |
268 // by base addresses. | 271 // by base addresses. |
269 class RegionTree { | 272 class RegionTree { |
270 public: | 273 public: |
271 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left
(nullptr), m_right(nullptr) { } | 274 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left
(nullptr), m_right(nullptr) { } |
272 ~RegionTree() | 275 ~RegionTree() |
273 { | 276 { |
274 delete m_left; | 277 delete m_left; |
(...skipping 26 matching lines...) Expand all Loading... |
301 static size_t s_allocatedSpace; | 304 static size_t s_allocatedSpace; |
302 static size_t s_allocatedObjectSize; | 305 static size_t s_allocatedObjectSize; |
303 static size_t s_objectSizeAtLastGC; | 306 static size_t s_objectSizeAtLastGC; |
304 static size_t s_markedObjectSize; | 307 static size_t s_markedObjectSize; |
305 static size_t s_markedObjectSizeAtLastCompleteSweep; | 308 static size_t s_markedObjectSizeAtLastCompleteSweep; |
306 static size_t s_wrapperCount; | 309 static size_t s_wrapperCount; |
307 static size_t s_wrapperCountAtLastGC; | 310 static size_t s_wrapperCountAtLastGC; |
308 static size_t s_collectedWrapperCount; | 311 static size_t s_collectedWrapperCount; |
309 static size_t s_partitionAllocSizeAtLastGC; | 312 static size_t s_partitionAllocSizeAtLastGC; |
310 static double s_estimatedMarkingTimePerByte; | 313 static double s_estimatedMarkingTimePerByte; |
311 #if ENABLE(ASSERT) | 314 static uint32_t s_gcGeneration; |
312 static uint16_t s_gcGeneration; | |
313 #endif | |
314 | 315 |
315 friend class ThreadState; | 316 friend class ThreadState; |
316 }; | 317 }; |
317 | 318 |
318 template<typename T> | 319 template<typename T> |
319 struct IsEagerlyFinalizedType { | 320 struct IsEagerlyFinalizedType { |
320 private: | 321 private: |
321 typedef char YesType; | 322 typedef char YesType; |
322 struct NoType { | 323 struct NoType { |
323 char padding[8]; | 324 char padding[8]; |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
448 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE() | 449 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() EAGERLY_FINALIZE() |
449 #else | 450 #else |
450 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() | 451 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() |
451 #endif | 452 #endif |
452 | 453 |
453 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he
apIndex, size_t gcInfoIndex) | 454 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he
apIndex, size_t gcInfoIndex) |
454 { | 455 { |
455 ASSERT(state->isAllocationAllowed()); | 456 ASSERT(state->isAllocationAllowed()); |
456 ASSERT(heapIndex != BlinkGC::LargeObjectHeapIndex); | 457 ASSERT(heapIndex != BlinkGC::LargeObjectHeapIndex); |
457 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 458 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
458 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 459 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex, gcGen
eration()); |
459 } | 460 } |
460 | 461 |
461 template<typename T> | 462 template<typename T> |
462 Address Heap::allocate(size_t size, bool eagerlySweep) | 463 Address Heap::allocate(size_t size, bool eagerlySweep) |
463 { | 464 { |
464 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 465 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
465 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? BlinkGC::EagerS
weepHeapIndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index()); | 466 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? BlinkGC::EagerS
weepHeapIndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index()); |
466 } | 467 } |
467 | 468 |
468 template<typename T> | 469 template<typename T> |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
503 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) | 504 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) |
504 { | 505 { |
505 T** cell = reinterpret_cast<T**>(object); | 506 T** cell = reinterpret_cast<T**>(object); |
506 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) | 507 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) |
507 *cell = nullptr; | 508 *cell = nullptr; |
508 } | 509 } |
509 | 510 |
510 } // namespace blink | 511 } // namespace blink |
511 | 512 |
512 #endif // Heap_h | 513 #endif // Heap_h |
OLD | NEW |