| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 1096 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1107 { | 1107 { |
| 1108 NoSweepScope scope(this); | 1108 NoSweepScope scope(this); |
| 1109 | 1109 |
| 1110 // Disallow allocation during weak processing. | 1110 // Disallow allocation during weak processing. |
| 1111 enterNoAllocationScope(); | 1111 enterNoAllocationScope(); |
| 1112 { | 1112 { |
| 1113 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); | 1113 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); |
| 1114 // Perform thread-specific weak processing. | 1114 // Perform thread-specific weak processing. |
| 1115 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 1115 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
| 1116 } | 1116 } |
| 1117 invokePreFinalizer(*Heap::s_markingVisitor); |
| 1117 leaveNoAllocationScope(); | 1118 leaveNoAllocationScope(); |
| 1118 | 1119 |
| 1119 // Perform sweeping and finalization. | 1120 // Perform sweeping and finalization. |
| 1120 | 1121 |
| 1121 // Sweeping will recalculate the stats | 1122 // Sweeping will recalculate the stats |
| 1122 m_stats.clear(); | 1123 m_stats.clear(); |
| 1123 | 1124 |
| 1124 // Sweep the non-finalized heap pages on multiple threads. | 1125 // Sweep the non-finalized heap pages on multiple threads. |
| 1125 // Attempt to load-balance by having the sweeper thread sweep as | 1126 // Attempt to load-balance by having the sweeper thread sweep as |
| 1126 // close to half of the pages as possible. | 1127 // close to half of the pages as possible. |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1240 ASSERT(!state->isAtSafePoint()); | 1241 ASSERT(!state->isAtSafePoint()); |
| 1241 state->safePoint(HeapPointersOnStack); | 1242 state->safePoint(HeapPointersOnStack); |
| 1242 } | 1243 } |
| 1243 | 1244 |
| 1244 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 1245 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
| 1245 { | 1246 { |
| 1246 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 1247 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
| 1247 return threads; | 1248 return threads; |
| 1248 } | 1249 } |
| 1249 | 1250 |
| 1251 void ThreadState::unregisterObjectWithPreFinalizerInternal(void* target) |
| 1252 { |
| 1253 if (isSweepInProgress()) |
| 1254 return; |
| 1255 auto it = m_objectsWithPreFinalizer.find(target); |
| 1256 ASSERT(it != m_objectsWithPreFinalizer.end()); |
| 1257 m_objectsWithPreFinalizer.remove(it); |
| 1258 } |
| 1259 |
| 1260 void ThreadState::invokePreFinalizer(Visitor& visitor) |
| 1261 { |
| 1262 Vector<void*> deadObjects; |
| 1263 for (auto& entry : m_objectsWithPreFinalizer) { |
| 1264 if (entry.value(entry.key, visitor)) |
| 1265 deadObjects.append(entry.key); |
| 1266 } |
| 1267 m_objectsWithPreFinalizer.removeAll(deadObjects); |
| 1268 } |
| 1269 |
| 1250 #if ENABLE(GC_PROFILE_MARKING) | 1270 #if ENABLE(GC_PROFILE_MARKING) |
| 1251 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) | 1271 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) |
| 1252 { | 1272 { |
| 1253 bool needLockForIteration = !isAnyThreadInGC(); | 1273 bool needLockForIteration = !isAnyThreadInGC(); |
| 1254 if (needLockForIteration) | 1274 if (needLockForIteration) |
| 1255 threadAttachMutex().lock(); | 1275 threadAttachMutex().lock(); |
| 1256 | 1276 |
| 1257 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); | 1277 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); |
| 1258 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1278 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
| 1259 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1279 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
| 1260 if (needLockForIteration) | 1280 if (needLockForIteration) |
| 1261 threadAttachMutex().unlock(); | 1281 threadAttachMutex().unlock(); |
| 1262 return gcInfo; | 1282 return gcInfo; |
| 1263 } | 1283 } |
| 1264 } | 1284 } |
| 1265 if (needLockForIteration) | 1285 if (needLockForIteration) |
| 1266 threadAttachMutex().unlock(); | 1286 threadAttachMutex().unlock(); |
| 1267 return 0; | 1287 return 0; |
| 1268 } | 1288 } |
| 1269 #endif | 1289 #endif |
| 1270 | 1290 |
| 1271 } | 1291 } |
| OLD | NEW |