Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(646)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 1159773004: Oilpan: Implement a GC to take a heap snapshot (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
104 static String classOf(const void* object) 104 static String classOf(const void* object)
105 { 105 {
106 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object)))) 106 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object))))
107 return gcInfo->m_className; 107 return gcInfo->m_className;
108 return "unknown"; 108 return "unknown";
109 } 109 }
110 #endif 110 #endif
111 111
112 class GCScope { 112 class GCScope {
113 public: 113 public:
114 explicit GCScope(ThreadState::StackState stackState) 114 static GCScope* current()
115 {
116 ASSERT(ThreadState::current()->isInGC());
117 ASSERT(s_currentGCScope);
118 return s_currentGCScope;
119 }
120
121 GCScope(ThreadState::StackState stackState, ThreadState::GCType gcType)
115 : m_state(ThreadState::current()) 122 : m_state(ThreadState::current())
123 , m_gcType(gcType)
116 , m_safePointScope(stackState) 124 , m_safePointScope(stackState)
117 , m_parkedAllThreads(false) 125 , m_parkedAllThreads(false)
118 { 126 {
119 TRACE_EVENT0("blink_gc", "Heap::GCScope"); 127 TRACE_EVENT0("blink_gc", "Heap::GCScope");
120 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 128 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
121 if (m_state->isMainThread()) 129 if (m_state->isMainThread())
122 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); 130 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
123 131
124 m_state->checkThread(); 132 m_state->checkThread();
125 133
126 // FIXME: in an unlikely coincidence that two threads decide 134 // FIXME: in an unlikely coincidence that two threads decide
127 // to collect garbage at the same time, avoid doing two GCs in 135 // to collect garbage at the same time, avoid doing two GCs in
128 // a row. 136 // a row.
129 if (LIKELY(ThreadState::stopThreads())) { 137 if (LIKELY(ThreadState::stopThreads())) {
130 m_parkedAllThreads = true; 138 m_parkedAllThreads = true;
131 } 139 }
132 if (m_state->isMainThread()) 140 if (m_state->isMainThread())
133 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 141 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
142
143 ASSERT(!s_currentGCScope);
144 s_currentGCScope = this;
134 } 145 }
135 146
136 bool allThreadsParked() { return m_parkedAllThreads; } 147 bool allThreadsParked() const { return m_parkedAllThreads; }
148 ThreadState::GCType gcType() const { return m_gcType; }
137 149
138 ~GCScope() 150 ~GCScope()
139 { 151 {
152 s_currentGCScope = nullptr;
140 // Only cleanup if we parked all threads in which case the GC happened 153 // Only cleanup if we parked all threads in which case the GC happened
141 // and we need to resume the other threads. 154 // and we need to resume the other threads.
142 if (LIKELY(m_parkedAllThreads)) { 155 if (LIKELY(m_parkedAllThreads)) {
143 ThreadState::resumeThreads(); 156 ThreadState::resumeThreads();
144 } 157 }
145 } 158 }
146 159
147 private: 160 private:
148 ThreadState* m_state; 161 ThreadState* m_state;
162 ThreadState::GCType m_gcType;
149 SafePointScope m_safePointScope; 163 SafePointScope m_safePointScope;
150 bool m_parkedAllThreads; // False if we fail to park all threads 164 bool m_parkedAllThreads; // False if we fail to park all threads
165
166 static GCScope* s_currentGCScope;
151 }; 167 };
152 168
169 GCScope* GCScope::s_currentGCScope = nullptr;
170
153 #if ENABLE(ASSERT) 171 #if ENABLE(ASSERT)
154 NO_SANITIZE_ADDRESS 172 NO_SANITIZE_ADDRESS
155 void HeapObjectHeader::zapMagic() 173 void HeapObjectHeader::zapMagic()
156 { 174 {
157 checkHeader(); 175 checkHeader();
158 m_magic = zappedMagic; 176 m_magic = zappedMagic;
159 } 177 }
160 #endif 178 #endif
161 179
162 void HeapObjectHeader::finalize(Address object, size_t objectSize) 180 void HeapObjectHeader::finalize(Address object, size_t objectSize)
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
292 } 310 }
293 if (previousPage) { 311 if (previousPage) {
294 ASSERT(m_firstUnsweptPage); 312 ASSERT(m_firstUnsweptPage);
295 previousPage->m_next = m_firstPage; 313 previousPage->m_next = m_firstPage;
296 m_firstPage = m_firstUnsweptPage; 314 m_firstPage = m_firstUnsweptPage;
297 m_firstUnsweptPage = nullptr; 315 m_firstUnsweptPage = nullptr;
298 } 316 }
299 ASSERT(!m_firstUnsweptPage); 317 ASSERT(!m_firstUnsweptPage);
300 } 318 }
301 319
320 void BaseHeap::makeConsistentForMutator()
321 {
322 clearFreeLists();
323 ASSERT(isConsistentForGC());
324 ASSERT(!m_firstPage);
325
326 // Drop marks from marked objects and rebuild free lists in preparation for
327 // resuming the executions of mutators.
328 BasePage* previousPage = nullptr;
329 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) {
330 page->makeConsistentForMutator();
331 page->markAsSwept();
332 }
333 if (previousPage) {
334 ASSERT(m_firstUnsweptPage);
335 previousPage->m_next = m_firstPage;
336 m_firstPage = m_firstUnsweptPage;
337 m_firstUnsweptPage = nullptr;
338 }
339 ASSERT(!m_firstUnsweptPage);
340 }
341
302 size_t BaseHeap::objectPayloadSizeForTesting() 342 size_t BaseHeap::objectPayloadSizeForTesting()
303 { 343 {
304 ASSERT(isConsistentForGC()); 344 ASSERT(isConsistentForGC());
305 ASSERT(!m_firstUnsweptPage); 345 ASSERT(!m_firstUnsweptPage);
306 346
307 size_t objectPayloadSize = 0; 347 size_t objectPayloadSize = 0;
308 for (BasePage* page = m_firstPage; page; page = page->next()) 348 for (BasePage* page = m_firstPage; page; page = page->next())
309 objectPayloadSize += page->objectPayloadSizeForTesting(); 349 objectPayloadSize += page->objectPayloadSizeForTesting();
310 return objectPayloadSize; 350 return objectPayloadSize;
311 } 351 }
312 352
313 void BaseHeap::prepareHeapForTermination() 353 void BaseHeap::prepareHeapForTermination()
314 { 354 {
315 ASSERT(!m_firstUnsweptPage); 355 ASSERT(!m_firstUnsweptPage);
316 for (BasePage* page = m_firstPage; page; page = page->next()) { 356 for (BasePage* page = m_firstPage; page; page = page->next()) {
317 page->setTerminating(); 357 page->setTerminating();
318 } 358 }
319 } 359 }
320 360
321 void BaseHeap::prepareForSweep() 361 void BaseHeap::prepareForSweep()
322 { 362 {
323 ASSERT(!threadState()->isInGC()); 363 ASSERT(threadState()->isInGC());
324 ASSERT(!m_firstUnsweptPage); 364 ASSERT(!m_firstUnsweptPage);
325 365
326 // Move all pages to a list of unswept pages. 366 // Move all pages to a list of unswept pages.
327 m_firstUnsweptPage = m_firstPage; 367 m_firstUnsweptPage = m_firstPage;
328 m_firstPage = nullptr; 368 m_firstPage = nullptr;
329 } 369 }
330 370
331 #if defined(ADDRESS_SANITIZER) 371 #if defined(ADDRESS_SANITIZER)
332 void BaseHeap::poisonUnmarkedObjects() 372 void BaseHeap::poisonUnmarkedObjects()
333 { 373 {
(...skipping 838 matching lines...) Expand 10 before | Expand all | Expand 10 after
1172 markedObjectSize += header->size(); 1212 markedObjectSize += header->size();
1173 } else { 1213 } else {
1174 header->markDead(); 1214 header->markDead();
1175 } 1215 }
1176 headerAddress += header->size(); 1216 headerAddress += header->size();
1177 } 1217 }
1178 if (markedObjectSize) 1218 if (markedObjectSize)
1179 Heap::increaseMarkedObjectSize(markedObjectSize); 1219 Heap::increaseMarkedObjectSize(markedObjectSize);
1180 } 1220 }
1181 1221
1222 void NormalPage::makeConsistentForMutator()
1223 {
1224 size_t markedObjectSize = 0;
1225 Address startOfGap = payload();
1226 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1227 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1228 ASSERT(header->size() < blinkPagePayloadSize());
1229 // Check if a free list entry first since we cannot call
1230 // isMarked on a free list entry.
1231 if (header->isFree()) {
1232 headerAddress += header->size();
1233 continue;
1234 }
1235 header->checkHeader();
1236
1237 if (startOfGap != headerAddress)
1238 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap);
1239 if (header->isMarked()) {
1240 header->unmark();
1241 markedObjectSize += header->size();
1242 }
1243 headerAddress += header->size();
1244 startOfGap = headerAddress;
1245 }
1246 if (startOfGap != payloadEnd())
1247 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap );
1248
1249 if (markedObjectSize)
1250 Heap::increaseMarkedObjectSize(markedObjectSize);
1251 }
1252
1182 #if defined(ADDRESS_SANITIZER) 1253 #if defined(ADDRESS_SANITIZER)
1183 void NormalPage::poisonUnmarkedObjects() 1254 void NormalPage::poisonUnmarkedObjects()
1184 { 1255 {
1185 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1256 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1186 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1257 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1187 ASSERT(header->size() < blinkPagePayloadSize()); 1258 ASSERT(header->size() < blinkPagePayloadSize());
1188 // Check if a free list entry first since we cannot call 1259 // Check if a free list entry first since we cannot call
1189 // isMarked on a free list entry. 1260 // isMarked on a free list entry.
1190 if (header->isFree()) { 1261 if (header->isFree()) {
1191 headerAddress += header->size(); 1262 headerAddress += header->size();
(...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after
1461 { 1532 {
1462 HeapObjectHeader* header = heapObjectHeader(); 1533 HeapObjectHeader* header = heapObjectHeader();
1463 if (header->isMarked()) { 1534 if (header->isMarked()) {
1464 header->unmark(); 1535 header->unmark();
1465 Heap::increaseMarkedObjectSize(size()); 1536 Heap::increaseMarkedObjectSize(size());
1466 } else { 1537 } else {
1467 header->markDead(); 1538 header->markDead();
1468 } 1539 }
1469 } 1540 }
1470 1541
1542 void LargeObjectPage::makeConsistentForMutator()
1543 {
1544 HeapObjectHeader* header = heapObjectHeader();
1545 if (header->isMarked()) {
1546 header->unmark();
1547 Heap::increaseMarkedObjectSize(size());
1548 }
1549 }
1550
1471 #if defined(ADDRESS_SANITIZER) 1551 #if defined(ADDRESS_SANITIZER)
1472 void LargeObjectPage::poisonUnmarkedObjects() 1552 void LargeObjectPage::poisonUnmarkedObjects()
1473 { 1553 {
1474 HeapObjectHeader* header = heapObjectHeader(); 1554 HeapObjectHeader* header = heapObjectHeader();
1475 if (!header->isMarked()) 1555 if (!header->isMarked())
1476 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1556 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1477 } 1557 }
1478 #endif 1558 #endif
1479 1559
1480 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) 1560 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address)
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
1753 builder.append("\n\t"); 1833 builder.append("\n\t");
1754 builder.append(frameToName.nullableName()); 1834 builder.append(frameToName.nullableName());
1755 --framesToShow; 1835 --framesToShow;
1756 } 1836 }
1757 return builder.toString().replace("blink::", ""); 1837 return builder.toString().replace("blink::", "");
1758 } 1838 }
1759 #endif 1839 #endif
1760 1840
1761 void Heap::pushTraceCallback(void* object, TraceCallback callback) 1841 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1762 { 1842 {
1843 ASSERT(ThreadState::current()->isInGC());
1844
1763 // Trace should never reach an orphaned page. 1845 // Trace should never reach an orphaned page.
1764 ASSERT(!Heap::orphanedPagePool()->contains(object)); 1846 ASSERT(!Heap::orphanedPagePool()->contains(object));
1765 CallbackStack::Item* slot = s_markingStack->allocateEntry(); 1847 CallbackStack::Item* slot = s_markingStack->allocateEntry();
1766 *slot = CallbackStack::Item(object, callback); 1848 *slot = CallbackStack::Item(object, callback);
1767 } 1849 }
1768 1850
1769 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) 1851 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1770 { 1852 {
1771 CallbackStack::Item* item = s_markingStack->pop(); 1853 CallbackStack::Item* item = s_markingStack->pop();
1772 if (!item) 1854 if (!item)
1773 return false; 1855 return false;
1774 1856
1775 #if ENABLE(GC_PROFILING) 1857 #if ENABLE(GC_PROFILING)
1776 visitor->setHostInfo(item->object(), classOf(item->object())); 1858 visitor->setHostInfo(item->object(), classOf(item->object()));
1777 #endif 1859 #endif
1778 item->call(visitor); 1860 item->call(visitor);
1779 return true; 1861 return true;
1780 } 1862 }
1781 1863
1782 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) 1864 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
1783 { 1865 {
1866 ASSERT(ThreadState::current()->isInGC());
1867
1784 // Trace should never reach an orphaned page. 1868 // Trace should never reach an orphaned page.
1785 ASSERT(!Heap::orphanedPagePool()->contains(object)); 1869 ASSERT(!Heap::orphanedPagePool()->contains(object));
1786 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); 1870 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
1787 *slot = CallbackStack::Item(object, callback); 1871 *slot = CallbackStack::Item(object, callback);
1788 } 1872 }
1789 1873
1790 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) 1874 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
1791 { 1875 {
1792 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { 1876 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
1793 item->call(visitor); 1877 item->call(visitor);
1794 return true; 1878 return true;
1795 } 1879 }
1796 return false; 1880 return false;
1797 } 1881 }
1798 1882
1799 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) 1883 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
1800 { 1884 {
1885 ASSERT(ThreadState::current()->isInGC());
1886 // We don't want to run weak processings when taking a snapshot.
1887 if (GCScope::current()->gcType() == ThreadState::TakeSnapshot)
sof 2015/05/30 20:50:06 Did you explore providing MarkingVisitor<GlobalMar
1888 return;
1889
1801 // Trace should never reach an orphaned page. 1890 // Trace should never reach an orphaned page.
1802 ASSERT(!Heap::orphanedPagePool()->contains(cell)); 1891 ASSERT(!Heap::orphanedPagePool()->contains(cell));
1803 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); 1892 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry();
1804 *slot = CallbackStack::Item(cell, callback); 1893 *slot = CallbackStack::Item(cell, callback);
1805 } 1894 }
1806 1895
1807 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) 1896 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback)
1808 { 1897 {
1898 ASSERT(ThreadState::current()->isInGC());
1899 // We don't want to run weak processings when taking a snapshot.
1900 if (GCScope::current()->gcType() == ThreadState::TakeSnapshot)
1901 return;
1902
1809 // Trace should never reach an orphaned page. 1903 // Trace should never reach an orphaned page.
1810 ASSERT(!Heap::orphanedPagePool()->contains(object)); 1904 ASSERT(!Heap::orphanedPagePool()->contains(object));
1811 ThreadState* state = pageFromObject(object)->heap()->threadState(); 1905 ThreadState* state = pageFromObject(object)->heap()->threadState();
1812 state->pushThreadLocalWeakCallback(closure, callback); 1906 state->pushThreadLocalWeakCallback(closure, callback);
1813 } 1907 }
1814 1908
1815 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) 1909 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
1816 { 1910 {
1817 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { 1911 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) {
1818 item->call(visitor); 1912 item->call(visitor);
1819 return true; 1913 return true;
1820 } 1914 }
1821 return false; 1915 return false;
1822 } 1916 }
1823 1917
1824 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) 1918 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback)
1825 { 1919 {
1920 ASSERT(ThreadState::current()->isInGC());
1921
1826 // Trace should never reach an orphaned page. 1922 // Trace should never reach an orphaned page.
1827 ASSERT(!Heap::orphanedPagePool()->contains(table)); 1923 ASSERT(!Heap::orphanedPagePool()->contains(table));
1828 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); 1924 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
1829 *slot = CallbackStack::Item(table, iterationCallback); 1925 *slot = CallbackStack::Item(table, iterationCallback);
1830 1926
1831 // Register a post-marking callback to tell the tables that 1927 // Register a post-marking callback to tell the tables that
1832 // ephemeron iteration is complete. 1928 // ephemeron iteration is complete.
1833 pushPostMarkingCallback(table, iterationDoneCallback); 1929 pushPostMarkingCallback(table, iterationDoneCallback);
1834 } 1930 }
1835 1931
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1870 } 1966 }
1871 1967
1872 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp e gcType, GCReason reason) 1968 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp e gcType, GCReason reason)
1873 { 1969 {
1874 ThreadState* state = ThreadState::current(); 1970 ThreadState* state = ThreadState::current();
1875 RELEASE_ASSERT(!state->isInGC()); 1971 RELEASE_ASSERT(!state->isInGC());
1876 state->completeSweep(); 1972 state->completeSweep();
1877 ThreadState::GCState originalGCState = state->gcState(); 1973 ThreadState::GCState originalGCState = state->gcState();
1878 state->setGCState(ThreadState::StoppingOtherThreads); 1974 state->setGCState(ThreadState::StoppingOtherThreads);
1879 1975
1880 GCScope gcScope(stackState); 1976 GCScope gcScope(stackState, gcType);
1881 // Check if we successfully parked the other threads. If not we bail out of 1977 // Check if we successfully parked the other threads. If not we bail out of
1882 // the GC. 1978 // the GC.
1883 if (!gcScope.allThreadsParked()) { 1979 if (!gcScope.allThreadsParked()) {
1884 // Restore the original GCState. 1980 // Restore the original GCState.
1885 if (LIKELY(state->gcState() == ThreadState::StoppingOtherThreads)) 1981 if (LIKELY(state->gcState() == ThreadState::StoppingOtherThreads))
1886 state->setGCState(originalGCState); 1982 state->setGCState(originalGCState);
1887 return; 1983 return;
1888 } 1984 }
1889 1985
1890 if (state->isMainThread()) 1986 if (state->isMainThread())
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after
2224 size_t Heap::s_allocatedObjectSize = 0; 2320 size_t Heap::s_allocatedObjectSize = 0;
2225 size_t Heap::s_allocatedSpace = 0; 2321 size_t Heap::s_allocatedSpace = 0;
2226 size_t Heap::s_markedObjectSize = 0; 2322 size_t Heap::s_markedObjectSize = 0;
2227 // We don't want to use 0 KB for the initial value because it may end up 2323 // We don't want to use 0 KB for the initial value because it may end up
2228 // triggering the first GC of some thread too prematurely. 2324 // triggering the first GC of some thread too prematurely.
2229 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; 2325 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024;
2230 size_t Heap::s_externalObjectSizeAtLastGC = 0; 2326 size_t Heap::s_externalObjectSizeAtLastGC = 0;
2231 double Heap::s_estimatedMarkingTimePerByte = 0.0; 2327 double Heap::s_estimatedMarkingTimePerByte = 0.0;
2232 2328
2233 } // namespace blink 2329 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698