OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
81 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu
tex)); | 81 AtomicallyInitializedStaticReference(RecursiveMutex, mutex, (new RecursiveMu
tex)); |
82 return mutex; | 82 return mutex; |
83 } | 83 } |
84 | 84 |
85 ThreadState::ThreadState() | 85 ThreadState::ThreadState() |
86 : m_thread(currentThread()) | 86 : m_thread(currentThread()) |
87 , m_persistents(adoptPtr(new PersistentAnchor())) | 87 , m_persistents(adoptPtr(new PersistentAnchor())) |
88 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 88 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
89 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 89 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
90 , m_safePointScopeMarker(nullptr) | 90 , m_safePointScopeMarker(nullptr) |
91 , m_atSafePoint(false) | |
92 , m_interruptors() | 91 , m_interruptors() |
93 , m_sweepForbidden(false) | |
94 , m_noAllocationCount(0) | 92 , m_noAllocationCount(0) |
95 , m_gcForbiddenCount(0) | 93 , m_gcForbiddenCount(0) |
96 , m_vectorBackingHeapIndex(Vector1HeapIndex) | 94 , m_vectorBackingHeapIndex(Vector1HeapIndex) |
97 , m_currentHeapAges(0) | 95 , m_currentHeapAges(0) |
98 , m_isTerminating(false) | |
99 , m_gcMixinMarker(nullptr) | 96 , m_gcMixinMarker(nullptr) |
100 , m_shouldFlushHeapDoesNotContainCache(false) | 97 , m_shouldFlushHeapDoesNotContainCache(false) |
101 , m_gcState(NoGCScheduled) | 98 , m_gcState(NoGCScheduled) |
102 , m_traceDOMWrappers(nullptr) | 99 , m_traceDOMWrappers(nullptr) |
103 #if defined(ADDRESS_SANITIZER) | 100 #if defined(ADDRESS_SANITIZER) |
104 , m_asanFakeStack(__asan_get_current_fake_stack()) | 101 , m_asanFakeStack(__asan_get_current_fake_stack()) |
105 #endif | 102 #endif |
106 #if ENABLE(GC_PROFILING) | 103 #if ENABLE(GC_PROFILING) |
107 , m_nextFreeListSnapshotTime(-std::numeric_limits<double>::infinity()) | 104 , m_nextFreeListSnapshotTime(-std::numeric_limits<double>::infinity()) |
108 #endif | 105 #endif |
| 106 , m_atSafePoint(false) |
| 107 , m_isTerminating(false) |
| 108 , m_isSweepForbidden(false) |
| 109 #if ENABLE(ASSERT) |
| 110 , m_isEagerlySweeping(false) |
| 111 #endif |
109 { | 112 { |
110 checkThread(); | 113 checkThread(); |
111 ASSERT(!**s_threadSpecific); | 114 ASSERT(!**s_threadSpecific); |
112 **s_threadSpecific = this; | 115 **s_threadSpecific = this; |
113 | 116 |
114 if (isMainThread()) { | 117 if (isMainThread()) { |
115 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); | 118 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); |
116 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack
Size(); | 119 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack
Size(); |
117 if (underestimatedStackSize > sizeof(void*)) | 120 if (underestimatedStackSize > sizeof(void*)) |
118 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size
of(void*); | 121 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size
of(void*); |
(...skipping 525 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
644 if (!isSweepingInProgress()) | 647 if (!isSweepingInProgress()) |
645 return; | 648 return; |
646 | 649 |
647 // This check is here to prevent performIdleLazySweep() from being called | 650 // This check is here to prevent performIdleLazySweep() from being called |
648 // recursively. I'm not sure if it can happen but it would be safer to have | 651 // recursively. I'm not sure if it can happen but it would be safer to have |
649 // the check just in case. | 652 // the check just in case. |
650 if (sweepForbidden()) | 653 if (sweepForbidden()) |
651 return; | 654 return; |
652 | 655 |
653 bool sweepCompleted = true; | 656 bool sweepCompleted = true; |
654 ThreadState::SweepForbiddenScope scope(this); | 657 SweepForbiddenScope scope(this); |
655 { | 658 { |
656 if (isMainThread()) | 659 if (isMainThread()) |
657 ScriptForbiddenScope::enter(); | 660 ScriptForbiddenScope::enter(); |
658 | 661 |
659 for (int i = 0; i < NumberOfHeaps; i++) { | 662 for (int i = 0; i < NumberOfHeaps; i++) { |
660 // lazySweepWithDeadline() won't check the deadline until it sweeps | 663 // lazySweepWithDeadline() won't check the deadline until it sweeps |
661 // 10 pages. So we give a small slack for safety. | 664 // 10 pages. So we give a small slack for safety. |
662 double slack = 0.001; | 665 double slack = 0.001; |
663 double remainingBudget = deadlineSeconds - slack - Platform::current
()->monotonicallyIncreasingTime(); | 666 double remainingBudget = deadlineSeconds - slack - Platform::current
()->monotonicallyIncreasingTime(); |
664 if (remainingBudget <= 0 || !m_heaps[i]->lazySweepWithDeadline(deadl
ineSeconds)) { | 667 if (remainingBudget <= 0 || !m_heaps[i]->lazySweepWithDeadline(deadl
ineSeconds)) { |
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
976 { | 979 { |
977 // Some objects need to be finalized promptly and cannot be handled | 980 // Some objects need to be finalized promptly and cannot be handled |
978 // by lazy sweeping. Keep those in a designated heap and sweep it | 981 // by lazy sweeping. Keep those in a designated heap and sweep it |
979 // eagerly. | 982 // eagerly. |
980 ASSERT(isSweepingInProgress()); | 983 ASSERT(isSweepingInProgress()); |
981 | 984 |
982 // Mirroring the completeSweep() condition; see its comment. | 985 // Mirroring the completeSweep() condition; see its comment. |
983 if (sweepForbidden()) | 986 if (sweepForbidden()) |
984 return; | 987 return; |
985 | 988 |
986 ThreadState::SweepForbiddenScope scope(this); | 989 SweepForbiddenScope scope(this); |
987 { | 990 { |
988 if (isMainThread()) | 991 if (isMainThread()) |
989 ScriptForbiddenScope::enter(); | 992 ScriptForbiddenScope::enter(); |
990 | 993 |
991 m_heaps[EagerSweepHeapIndex]->completeSweep(); | 994 m_heaps[EagerSweepHeapIndex]->completeSweep(); |
992 | 995 |
993 if (isMainThread()) | 996 if (isMainThread()) |
994 ScriptForbiddenScope::exit(); | 997 ScriptForbiddenScope::exit(); |
995 } | 998 } |
996 } | 999 } |
997 | 1000 |
998 void ThreadState::completeSweep() | 1001 void ThreadState::completeSweep() |
999 { | 1002 { |
1000 // If we are not in a sweeping phase, there is nothing to do here. | 1003 // If we are not in a sweeping phase, there is nothing to do here. |
1001 if (!isSweepingInProgress()) | 1004 if (!isSweepingInProgress()) |
1002 return; | 1005 return; |
1003 | 1006 |
1004 // completeSweep() can be called recursively if finalizers can allocate | 1007 // completeSweep() can be called recursively if finalizers can allocate |
1005 // memory and the allocation triggers completeSweep(). This check prevents | 1008 // memory and the allocation triggers completeSweep(). This check prevents |
1006 // the sweeping from being executed recursively. | 1009 // the sweeping from being executed recursively. |
1007 if (sweepForbidden()) | 1010 if (sweepForbidden()) |
1008 return; | 1011 return; |
1009 | 1012 |
1010 ThreadState::SweepForbiddenScope scope(this); | 1013 SweepForbiddenScope scope(this); |
1011 { | 1014 { |
1012 if (isMainThread()) | 1015 if (isMainThread()) |
1013 ScriptForbiddenScope::enter(); | 1016 ScriptForbiddenScope::enter(); |
1014 | 1017 |
1015 TRACE_EVENT0("blink_gc", "ThreadState::completeSweep"); | 1018 TRACE_EVENT0("blink_gc", "ThreadState::completeSweep"); |
1016 double timeStamp = WTF::currentTimeMS(); | 1019 double timeStamp = WTF::currentTimeMS(); |
1017 | 1020 |
1018 static_assert(EagerSweepHeapIndex == 0, "Eagerly swept heaps must be pro
cessed first."); | 1021 static_assert(EagerSweepHeapIndex == 0, "Eagerly swept heaps must be pro
cessed first."); |
1019 for (int i = 0; i < NumberOfHeaps; i++) | 1022 for (int i = 0; i < NumberOfHeaps; i++) |
1020 m_heaps[i]->completeSweep(); | 1023 m_heaps[i]->completeSweep(); |
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1377 json->beginArray(it->key.ascii().data()); | 1380 json->beginArray(it->key.ascii().data()); |
1378 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1381 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
1379 json->pushInteger(it->value.ages[age]); | 1382 json->pushInteger(it->value.ages[age]); |
1380 json->endArray(); | 1383 json->endArray(); |
1381 } | 1384 } |
1382 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); | 1385 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); |
1383 } | 1386 } |
1384 #endif | 1387 #endif |
1385 | 1388 |
1386 } // namespace blink | 1389 } // namespace blink |
OLD | NEW |