OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
414 | 414 |
415 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) | 415 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) |
416 { | 416 { |
417 ASSERT(checkThread()); | 417 ASSERT(checkThread()); |
418 // For weak processing we should never reach orphaned pages since orphaned | 418 // For weak processing we should never reach orphaned pages since orphaned |
419 // pages are not traced and thus objects on those pages are never be | 419 // pages are not traced and thus objects on those pages are never be |
420 // registered as objects on orphaned pages. We cannot assert this here since | 420 // registered as objects on orphaned pages. We cannot assert this here since |
421 // we might have an off-heap collection. We assert it in | 421 // we might have an off-heap collection. We assert it in |
422 // ThreadHeap::pushThreadLocalWeakCallback. | 422 // ThreadHeap::pushThreadLocalWeakCallback. |
423 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { | 423 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { |
424 // Note that the thread-local weak processing can be called for | |
425 // an already dead object (for which isHeapObjectAlive(object) can | |
426 // return false). This can happen in the following scenario: | |
427 // | |
428 // 1) Marking runs. A weak callback for an object X is registered | |
429 // to the thread that created the object X (say, thread P). | |
430 // 2) Marking finishes. All other threads are resumed. | |
431 // 3) The object X becomes unreachable. | |
432 // 4) A next GC hits before the thread P wakes up. | |
433 // 5) Marking runs. The object X is not marked. | |
434 // 6) Marking finishes. All other threads are resumed. | |
435 // 7) The thread P wakes up and invokes pending weak callbacks. | |
436 // The weak callback for the object X is called, but the object X | |
437 // is already dead. | |
438 // | |
439 // Even in this case, it is safe to access the object X in the weak | |
440 // callback because it is not yet swept. It is completely wasteful | |
441 // to invoke the weak callback for dead objects but it is just | |
442 // wasteful and safe. | |
443 // | |
444 // TODO(Oilpan): Avoid calling weak callbacks for dead objects. | |
445 // We can do that by checking isHeapObjectAlive(object) before | |
446 // calling the weak callback, but in that case Callback::Item | |
447 // needs to understand T*. | |
448 item->call(visitor); | 424 item->call(visitor); |
449 return true; | 425 return true; |
450 } | 426 } |
451 return false; | 427 return false; |
452 } | 428 } |
453 | 429 |
454 void ThreadState::threadLocalWeakProcessing() | 430 void ThreadState::threadLocalWeakProcessing() |
455 { | 431 { |
456 ASSERT(checkThread()); | 432 ASSERT(checkThread()); |
457 ASSERT(!sweepForbidden()); | 433 ASSERT(!sweepForbidden()); |
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
957 m_arenas[i]->makeConsistentForMutator(); | 933 m_arenas[i]->makeConsistentForMutator(); |
958 } | 934 } |
959 | 935 |
960 void ThreadState::preGC() | 936 void ThreadState::preGC() |
961 { | 937 { |
962 ASSERT(!isInGC()); | 938 ASSERT(!isInGC()); |
963 setGCState(GCRunning); | 939 setGCState(GCRunning); |
964 makeConsistentForGC(); | 940 makeConsistentForGC(); |
965 flushHeapDoesNotContainCacheIfNeeded(); | 941 flushHeapDoesNotContainCacheIfNeeded(); |
966 clearArenaAges(); | 942 clearArenaAges(); |
| 943 |
| 944 // It is possible, albeit rare, for a thread to be kept |
| 945 // at a safepoint across multiple GCs, as resuming all attached |
| 946 // threads after the "global" GC phases will contend for the shared |
| 947 // safepoint barrier mutexes etc, which can additionally delay |
| 948 // a thread. Enough so that another thread may initiate |
| 949 // a new GC before this has happened. |
| 950 // |
| 951 // In which case the parked thread's ThreadState will have unprocessed |
| 952 // entries on its local weak callback stack when that later GC goes |
| 953 // ahead. Clear out and invalidate the stack now, as the thread |
| 954 // should only process callbacks that's found to be reachable by |
| 955 // the latest GC, when it eventually gets to next perform |
| 956 // thread-local weak processing. |
967 m_threadLocalWeakCallbackStack->clear(); | 957 m_threadLocalWeakCallbackStack->clear(); |
968 } | 958 } |
969 | 959 |
970 void ThreadState::postGC(BlinkGC::GCType gcType) | 960 void ThreadState::postGC(BlinkGC::GCType gcType) |
971 { | 961 { |
972 ASSERT(isInGC()); | 962 ASSERT(isInGC()); |
973 for (int i = 0; i < BlinkGC::NumberOfArenas; i++) | 963 for (int i = 0; i < BlinkGC::NumberOfArenas; i++) |
974 m_arenas[i]->prepareForSweep(); | 964 m_arenas[i]->prepareForSweep(); |
975 | 965 |
976 if (gcType == BlinkGC::GCWithSweep) { | 966 if (gcType == BlinkGC::GCWithSweep) { |
(...skipping 552 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1529 threadDump->AddScalar("dead_count", "objects", totalDeadCount); | 1519 threadDump->AddScalar("dead_count", "objects", totalDeadCount); |
1530 threadDump->AddScalar("live_size", "bytes", totalLiveSize); | 1520 threadDump->AddScalar("live_size", "bytes", totalLiveSize); |
1531 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); | 1521 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); |
1532 | 1522 |
1533 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1523 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
1534 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1524 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
1535 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); | 1525 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); |
1536 } | 1526 } |
1537 | 1527 |
1538 } // namespace blink | 1528 } // namespace blink |
OLD | NEW |