Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 1096 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1107 { | 1107 { |
| 1108 NoSweepScope scope(this); | 1108 NoSweepScope scope(this); |
| 1109 | 1109 |
| 1110 // Disallow allocation during weak processing. | 1110 // Disallow allocation during weak processing. |
| 1111 enterNoAllocationScope(); | 1111 enterNoAllocationScope(); |
| 1112 { | 1112 { |
| 1113 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); | 1113 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); |
| 1114 // Perform thread-specific weak processing. | 1114 // Perform thread-specific weak processing. |
| 1115 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 1115 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
| 1116 } | 1116 } |
| 1117 { | |
| 1118 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); | |
| 1119 invokePreFinalizers(*Heap::s_markingVisitor); | |
|
haraken
2014/10/06 01:55:27
Just to confirm: The main difference between weak
| |
| 1120 } | |
| 1117 leaveNoAllocationScope(); | 1121 leaveNoAllocationScope(); |
| 1118 | 1122 |
| 1119 // Perform sweeping and finalization. | 1123 // Perform sweeping and finalization. |
| 1120 | 1124 |
| 1121 // Sweeping will recalculate the stats | 1125 // Sweeping will recalculate the stats |
| 1122 m_stats.clear(); | 1126 m_stats.clear(); |
| 1123 | 1127 |
| 1124 // Sweep the non-finalized heap pages on multiple threads. | 1128 // Sweep the non-finalized heap pages on multiple threads. |
| 1125 // Attempt to load-balance by having the sweeper thread sweep as | 1129 // Attempt to load-balance by having the sweeper thread sweep as |
| 1126 // close to half of the pages as possible. | 1130 // close to half of the pages as possible. |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1240 ASSERT(!state->isAtSafePoint()); | 1244 ASSERT(!state->isAtSafePoint()); |
| 1241 state->safePoint(HeapPointersOnStack); | 1245 state->safePoint(HeapPointersOnStack); |
| 1242 } | 1246 } |
| 1243 | 1247 |
| 1244 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 1248 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
| 1245 { | 1249 { |
| 1246 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 1250 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
| 1247 return threads; | 1251 return threads; |
| 1248 } | 1252 } |
| 1249 | 1253 |
| 1254 void ThreadState::unregisterPreFinalizerInternal(void* target) | |
| 1255 { | |
| 1256 if (isSweepInProgress()) | |
| 1257 return; | |
| 1258 auto it = m_preFinalizers.find(target); | |
| 1259 ASSERT(it != m_preFinalizers.end()); | |
| 1260 m_preFinalizers.remove(it); | |
| 1261 } | |
| 1262 | |
| 1263 void ThreadState::invokePreFinalizers(Visitor& visitor) | |
| 1264 { | |
| 1265 Vector<void*> deadObjects; | |
| 1266 for (auto& entry : m_preFinalizers) { | |
| 1267 if (entry.value(entry.key, visitor)) | |
| 1268 deadObjects.append(entry.key); | |
| 1269 } | |
| 1270 m_preFinalizers.removeAll(deadObjects); | |
| 1271 } | |
| 1272 | |
| 1250 #if ENABLE(GC_PROFILE_MARKING) | 1273 #if ENABLE(GC_PROFILE_MARKING) |
| 1251 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) | 1274 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) |
| 1252 { | 1275 { |
| 1253 bool needLockForIteration = !isAnyThreadInGC(); | 1276 bool needLockForIteration = !isAnyThreadInGC(); |
| 1254 if (needLockForIteration) | 1277 if (needLockForIteration) |
| 1255 threadAttachMutex().lock(); | 1278 threadAttachMutex().lock(); |
| 1256 | 1279 |
| 1257 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); | 1280 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); |
| 1258 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1281 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1259 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1282 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
| 1260 if (needLockForIteration) | 1283 if (needLockForIteration) |
| 1261 threadAttachMutex().unlock(); | 1284 threadAttachMutex().unlock(); |
| 1262 return gcInfo; | 1285 return gcInfo; |
| 1263 } | 1286 } |
| 1264 } | 1287 } |
| 1265 if (needLockForIteration) | 1288 if (needLockForIteration) |
| 1266 threadAttachMutex().unlock(); | 1289 threadAttachMutex().unlock(); |
| 1267 return 0; | 1290 return 0; |
| 1268 } | 1291 } |
| 1269 #endif | 1292 #endif |
| 1270 | 1293 |
| 1271 } | 1294 } |
| OLD | NEW |