Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef MarkingVisitorImpl_h | 5 #ifndef MarkingVisitorImpl_h |
| 6 #define MarkingVisitorImpl_h | 6 #define MarkingVisitorImpl_h |
| 7 | 7 |
| 8 #include "platform/heap/Heap.h" | 8 #include "platform/heap/Heap.h" |
| 9 #include "platform/heap/ThreadState.h" | 9 #include "platform/heap/ThreadState.h" |
| 10 #include "platform/heap/Visitor.h" | 10 #include "platform/heap/Visitor.h" |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 31 // from a live thread heap to a dead thread heap. We must eliminate | 31 // from a live thread heap to a dead thread heap. We must eliminate |
| 32 // the dangling pointer. | 32 // the dangling pointer. |
| 33 // Release builds don't have the ASSERT, but it is OK because | 33 // Release builds don't have the ASSERT, but it is OK because |
| 34 // release builds will crash in the following header->isMarked() | 34 // release builds will crash in the following header->isMarked() |
| 35 // because all the entries of the orphaned heaps are zapped. | 35 // because all the entries of the orphaned heaps are zapped. |
| 36 ASSERT(!pageFromObject(objectPointer)->orphaned()); | 36 ASSERT(!pageFromObject(objectPointer)->orphaned()); |
| 37 | 37 |
| 38 if (header->isMarked()) | 38 if (header->isMarked()) |
| 39 return; | 39 return; |
| 40 | 40 |
| 41 ASSERT(ThreadState::current()->isInGC()); | 41 ASSERT(toDerived()->threadState()->isInGC()); |
| 42 if (toDerived()->threadState()->perThreadHeapEnabled()) | |
| 43 RELEASE_ASSERT(ThreadState::forObject(objectPointer) == toDerived()- >threadState()); | |
|
haraken
2016/01/07 08:06:22
forObject => fromObject
haraken
2016/01/07 08:06:22
Hmm. This RELEASE_ASSERT would be a bit problemati
| |
| 42 #if !defined(NDEBUG) | 44 #if !defined(NDEBUG) |
| 43 ASSERT(Heap::findPageFromAddress(header)); | 45 else |
| 46 ASSERT(Heap::findPageFromAddress(header)); | |
| 44 #endif | 47 #endif |
| 45 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); | 48 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); |
| 46 | 49 |
| 47 header->mark(); | 50 header->mark(); |
| 48 | 51 |
| 49 if (callback) | 52 if (callback) |
| 50 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback); | 53 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback, toDerived()->threadState()); |
| 51 } | 54 } |
| 52 | 55 |
| 53 inline void mark(const void* objectPointer, TraceCallback callback) | 56 inline void mark(const void* objectPointer, TraceCallback callback) |
| 54 { | 57 { |
| 55 if (!objectPointer) | 58 if (!objectPointer) |
| 56 return; | 59 return; |
| 57 HeapObjectHeader* header = HeapObjectHeader::fromPayload(objectPointer); | 60 HeapObjectHeader* header = HeapObjectHeader::fromPayload(objectPointer); |
| 58 markHeader(header, header->payload(), callback); | 61 markHeader(header, header->payload(), callback); |
| 59 } | 62 } |
| 60 | 63 |
| 61 inline void registerDelayedMarkNoTracing(const void* objectPointer) | 64 inline void registerDelayedMarkNoTracing(const void* objectPointer) |
| 62 { | 65 { |
| 63 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); | 66 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); |
| 64 Heap::pushPostMarkingCallback(const_cast<void*>(objectPointer), &markNoT racingCallback); | 67 Heap::pushPostMarkingCallback(const_cast<void*>(objectPointer), &markNoT racingCallback); |
| 65 } | 68 } |
| 66 | 69 |
| 67 inline void registerWeakMembers(const void* closure, const void* objectPoint er, WeakCallback callback) | 70 inline void registerWeakMembers(const void* closure, const void* objectPoint er, WeakCallback callback) |
| 68 { | 71 { |
| 69 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); | 72 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); |
| 70 // We don't want to run weak processings when taking a snapshot. | 73 // We don't want to run weak processings when taking a snapshot. |
| 71 if (toDerived()->markingMode() == Visitor::SnapshotMarking) | 74 if (toDerived()->markingMode() == Visitor::SnapshotMarking) |
| 72 return; | 75 return; |
| 73 Heap::pushThreadLocalWeakCallback(const_cast<void*>(closure), const_cast <void*>(objectPointer), callback); | 76 Heap::pushThreadLocalWeakCallback(const_cast<void*>(closure), const_cast <void*>(objectPointer), callback); |
| 74 } | 77 } |
| 75 | 78 |
| 76 inline void registerWeakTable(const void* closure, EphemeronCallback iterati onCallback, EphemeronCallback iterationDoneCallback) | 79 inline void registerWeakTable(const void* closure, EphemeronCallback iterati onCallback, EphemeronCallback iterationDoneCallback) |
| 77 { | 80 { |
| 78 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); | 81 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); |
| 79 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i terationDoneCallback); | 82 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, i terationDoneCallback, toDerived()->threadState()); |
| 80 } | 83 } |
| 81 | 84 |
| 82 #if ENABLE(ASSERT) | 85 #if ENABLE(ASSERT) |
| 83 inline bool weakTableRegistered(const void* closure) | 86 inline bool weakTableRegistered(const void* closure) |
| 84 { | 87 { |
| 85 return Heap::weakTableRegistered(closure); | 88 return Heap::weakTableRegistered(closure, toDerived()->threadState()); |
| 86 } | 89 } |
| 87 #endif | 90 #endif |
| 88 | 91 |
| 89 inline bool ensureMarked(const void* objectPointer) | 92 inline bool ensureMarked(const void* objectPointer) |
| 90 { | 93 { |
| 91 if (!objectPointer) | 94 if (!objectPointer) |
| 92 return false; | 95 return false; |
| 93 if (!toDerived()->shouldMarkObject(objectPointer)) | 96 if (!toDerived()->shouldMarkObject(objectPointer)) |
| 94 return false; | 97 return false; |
| 95 #if ENABLE(ASSERT) | 98 #if ENABLE(ASSERT) |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 113 return static_cast<Derived*>(this); | 116 return static_cast<Derived*>(this); |
| 114 } | 117 } |
| 115 | 118 |
| 116 protected: | 119 protected: |
| 117 inline void registerWeakCellWithCallback(void** cell, WeakCallback callback) | 120 inline void registerWeakCellWithCallback(void** cell, WeakCallback callback) |
| 118 { | 121 { |
| 119 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); | 122 ASSERT(toDerived()->markingMode() != Visitor::WeakProcessing); |
| 120 // We don't want to run weak processings when taking a snapshot. | 123 // We don't want to run weak processings when taking a snapshot. |
| 121 if (toDerived()->markingMode() == Visitor::SnapshotMarking) | 124 if (toDerived()->markingMode() == Visitor::SnapshotMarking) |
| 122 return; | 125 return; |
| 123 Heap::pushGlobalWeakCallback(cell, callback); | 126 Heap::pushGlobalWeakCallback(cell, callback, toDerived()->threadState()) ; |
| 124 } | 127 } |
| 125 | 128 |
| 126 private: | 129 private: |
| 127 static void markNoTracingCallback(Visitor* visitor, void* object) | 130 static void markNoTracingCallback(Visitor* visitor, void* object) |
| 128 { | 131 { |
| 129 visitor->markNoTracing(object); | 132 visitor->markNoTracing(object); |
| 130 } | 133 } |
| 131 }; | 134 }; |
| 132 | 135 |
| 133 } // namespace blink | 136 } // namespace blink |
| 134 | 137 |
| 135 #endif | 138 #endif |
| OLD | NEW |