Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(72)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 383743002: Oilpan: GC profiling. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: TracedValue contexts Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 21 matching lines...) Expand all
32 #include "platform/heap/Heap.h" 32 #include "platform/heap/Heap.h"
33 33
34 #include "platform/ScriptForbiddenScope.h" 34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/TraceEvent.h" 35 #include "platform/TraceEvent.h"
36 #include "platform/heap/ThreadState.h" 36 #include "platform/heap/ThreadState.h"
37 #include "public/platform/Platform.h" 37 #include "public/platform/Platform.h"
38 #include "wtf/AddressSpaceRandomization.h" 38 #include "wtf/AddressSpaceRandomization.h"
39 #include "wtf/Assertions.h" 39 #include "wtf/Assertions.h"
40 #include "wtf/LeakAnnotations.h" 40 #include "wtf/LeakAnnotations.h"
41 #include "wtf/PassOwnPtr.h" 41 #include "wtf/PassOwnPtr.h"
42 #if ENABLE(GC_TRACING) 42 #if GC_PROFILE_MARKING
43 #include "wtf/HashMap.h" 43 #include "wtf/HashMap.h"
44 #include "wtf/HashSet.h" 44 #include "wtf/HashSet.h"
45 #include "wtf/text/StringBuilder.h" 45 #include "wtf/text/StringBuilder.h"
46 #include "wtf/text/StringHash.h" 46 #include "wtf/text/StringHash.h"
47 #include <stdio.h> 47 #include <stdio.h>
48 #include <utility> 48 #include <utility>
49 #endif 49 #endif
50 #if GC_PROFILE_HEAP
51 #include "platform/TracedValue.h"
52 #endif
50 53
51 #if OS(POSIX) 54 #if OS(POSIX)
52 #include <sys/mman.h> 55 #include <sys/mman.h>
53 #include <unistd.h> 56 #include <unistd.h>
54 #elif OS(WIN) 57 #elif OS(WIN)
55 #include <windows.h> 58 #include <windows.h>
56 #endif 59 #endif
57 60
58 namespace WebCore { 61 namespace WebCore {
59 62
60 #if ENABLE(GC_TRACING) 63 #if GC_PROFILE_MARKING
61 static String classOf(const void* object) 64 static String classOf(const void* object)
62 { 65 {
63 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast <void*>(object))); 66 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast <void*>(object)));
64 if (gcInfo) 67 if (gcInfo)
65 return gcInfo->m_className; 68 return gcInfo->m_className;
66 69
67 return "unknown"; 70 return "unknown";
68 } 71 }
69 #endif 72 #endif
70 73
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after
362 MemoryRegion m_writable; 365 MemoryRegion m_writable;
363 }; 366 };
364 367
365 class GCScope { 368 class GCScope {
366 public: 369 public:
367 explicit GCScope(ThreadState::StackState stackState) 370 explicit GCScope(ThreadState::StackState stackState)
368 : m_state(ThreadState::current()) 371 : m_state(ThreadState::current())
369 , m_safePointScope(stackState) 372 , m_safePointScope(stackState)
370 , m_parkedAllThreads(false) 373 , m_parkedAllThreads(false)
371 { 374 {
372 TRACE_EVENT0("blink", "Heap::GCScope"); 375 TRACE_EVENT0(GC_PROFILE_GROUP, "Heap::GCScope");
373 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 376 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
374 if (m_state->isMainThread()) 377 if (m_state->isMainThread())
375 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCWaiting"); 378 TRACE_EVENT_SET_SAMPLING_STATE(GC_PROFILE_GROUP, "BlinkGCWaiting");
376 379
377 m_state->checkThread(); 380 m_state->checkThread();
378 381
379 // FIXME: in an unlikely coincidence that two threads decide 382 // FIXME: in an unlikely coincidence that two threads decide
380 // to collect garbage at the same time, avoid doing two GCs in 383 // to collect garbage at the same time, avoid doing two GCs in
381 // a row. 384 // a row.
382 RELEASE_ASSERT(!m_state->isInGC()); 385 RELEASE_ASSERT(!m_state->isInGC());
383 RELEASE_ASSERT(!m_state->isSweepInProgress()); 386 RELEASE_ASSERT(!m_state->isSweepInProgress());
384 if (LIKELY(ThreadState::stopThreads())) { 387 if (LIKELY(ThreadState::stopThreads())) {
385 m_parkedAllThreads = true; 388 m_parkedAllThreads = true;
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 { 501 {
499 return heapObjectHeader()->isMarked(); 502 return heapObjectHeader()->isMarked();
500 } 503 }
501 504
502 template<typename Header> 505 template<typename Header>
503 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) 506 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess)
504 { 507 {
505 ASSERT(contains(address)); 508 ASSERT(contains(address));
506 if (!objectContains(address)) 509 if (!objectContains(address))
507 return; 510 return;
508 #if ENABLE(GC_TRACING) 511 #if GC_PROFILE_MARKING
509 visitor->setHostInfo(&address, "stack"); 512 visitor->setHostInfo(&address, "stack");
510 #endif 513 #endif
511 mark(visitor); 514 mark(visitor);
512 } 515 }
513 516
514 template<> 517 template<>
515 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) 518 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
516 { 519 {
517 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) 520 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload()))
518 visitor->markConservatively(heapObjectHeader()); 521 visitor->markConservatively(heapObjectHeader());
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
634 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { 637 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
635 // Check that large pages are blinkPageSize aligned (modulo the 638 // Check that large pages are blinkPageSize aligned (modulo the
636 // osPageSize for the guard page). 639 // osPageSize for the guard page).
637 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlink PageStart(reinterpret_cast<Address>(current))); 640 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlink PageStart(reinterpret_cast<Address>(current)));
638 if (current->contains(address)) 641 if (current->contains(address))
639 return current; 642 return current;
640 } 643 }
641 return 0; 644 return 0;
642 } 645 }
643 646
644 #if ENABLE(GC_TRACING) 647 #if GC_PROFILE_MARKING
645 template<typename Header> 648 template<typename Header>
646 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address) 649 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
647 { 650 {
648 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { 651 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
649 if (current->contains(address)) 652 if (current->contains(address))
650 return current->gcInfo(); 653 return current->gcInfo();
651 } 654 }
652 return 0; 655 return 0;
653 } 656 }
654 #endif 657 #endif
655 658
659 #if GC_PROFILE_HEAP
660 template<typename Header>
661 void ThreadHeap<Header>::snapshot(TracedDictionaryBase* json, ThreadState::Snaps hotInfo* info)
662 {
663 size_t previousPageCount = info->pageCount;
664
665 TracedArray<TracedDictionaryBase>& pages = json->beginArray("pages");
666 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info ->pageCount) {
667 // FIXME: To limit the size of the snapshot we only output "threshold" m any page snapshots.
668 TracedArray<TracedArray<TracedDictionaryBase> >* jsonPage = 0;
669 if (info->pageCount < GC_PROFILE_HEAP_PAGE_DUMP_THRESHOLD) {
670 jsonPage = &pages.beginArray();
671 jsonPage->pushInteger(reinterpret_cast<intptr_t>(page));
672 }
673 page->snapshot(jsonPage, info);
674 if (jsonPage)
675 jsonPage->endArray();
676 }
677 pages.endArray();
678
679 TracedArray<TracedDictionaryBase>& largeObjects = json->beginArray("largeObj ects");
680 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
681 TracedDictionary<TracedArray<TracedDictionaryBase> >& jsonCurrent = larg eObjects.beginDictionary();
682 current->snapshot(&jsonCurrent, info);
683 jsonCurrent.endDictionary();
684 }
685 largeObjects.endArray();
686
687 size_t pagePoolSize = 0;
688 for (PagePoolEntry* page = m_pagePool; page; page = page->next())
689 ++pagePoolSize;
690
691 json->setInteger("pagePoolSize", pagePoolSize)
692 .setInteger("pageCount", info->pageCount - previousPageCount);
693 }
694 #endif
695
656 template<typename Header> 696 template<typename Header>
657 void ThreadHeap<Header>::addToFreeList(Address address, size_t size) 697 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
658 { 698 {
659 ASSERT(heapPageFromAddress(address)); 699 ASSERT(heapPageFromAddress(address));
660 ASSERT(heapPageFromAddress(address + size - 1)); 700 ASSERT(heapPageFromAddress(address + size - 1));
661 ASSERT(size < blinkPagePayloadSize()); 701 ASSERT(size < blinkPagePayloadSize());
662 // The free list entries are only pointer aligned (but when we allocate 702 // The free list entries are only pointer aligned (but when we allocate
663 // from them we are 8 byte aligned due to the header size). 703 // from them we are 8 byte aligned due to the header size).
664 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask)); 704 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocatio nMask));
665 ASSERT(!(size & allocationMask)); 705 ASSERT(!(size & allocationMask));
(...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after
1194 } 1234 }
1195 1235
1196 template<typename Header> 1236 template<typename Header>
1197 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) 1237 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1198 { 1238 {
1199 ASSERT(contains(address)); 1239 ASSERT(contains(address));
1200 Header* header = findHeaderFromAddress(address); 1240 Header* header = findHeaderFromAddress(address);
1201 if (!header) 1241 if (!header)
1202 return; 1242 return;
1203 1243
1204 #if ENABLE(GC_TRACING) 1244 #if GC_PROFILE_MARKING
1205 visitor->setHostInfo(&address, "stack"); 1245 visitor->setHostInfo(&address, "stack");
1206 #endif 1246 #endif
1207 if (hasVTable(header) && !vTableInitialized(header->payload())) 1247 if (hasVTable(header) && !vTableInitialized(header->payload()))
1208 visitor->markConservatively(header); 1248 visitor->markConservatively(header);
1209 else 1249 else
1210 visitor->mark(header, traceCallback(header)); 1250 visitor->mark(header, traceCallback(header));
1211 } 1251 }
1212 1252
1213 #if ENABLE(GC_TRACING) 1253 #if GC_PROFILE_MARKING
1214 template<typename Header> 1254 template<typename Header>
1215 const GCInfo* HeapPage<Header>::findGCInfo(Address address) 1255 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1216 { 1256 {
1217 if (address < payload()) 1257 if (address < payload())
1218 return 0; 1258 return 0;
1219 1259
1220 if (gcInfo()) // for non FinalizedObjectHeader 1260 if (gcInfo()) // for non FinalizedObjectHeader
1221 return gcInfo(); 1261 return gcInfo();
1222 1262
1223 Header* header = findHeaderFromAddress(address); 1263 Header* header = findHeaderFromAddress(address);
1224 if (!header) 1264 if (!header)
1225 return 0; 1265 return 0;
1226 1266
1227 return header->gcInfo(); 1267 return header->gcInfo();
1228 } 1268 }
1229 #endif 1269 #endif
1230 1270
1271 #if GC_PROFILE_HEAP
1272 template<typename Header>
1273 void HeapPage<Header>::snapshot(TracedArrayBase* json, ThreadState::SnapshotInfo * info)
1274 {
1275 Header* header = 0;
1276 for (Address addr = payload(); addr < end(); addr += header->size()) {
1277 header = reinterpret_cast<Header*>(addr);
1278 if (json)
1279 json->pushInteger(header->encodedSize());
1280 if (header->isFree()) {
1281 info->freeSize += header->size();
1282 continue;
1283 }
1284
1285 size_t tag = info->getClassTag(header->gcInfo());
1286 size_t age = header->age();
1287 if (json)
1288 json->pushInteger(tag);
1289 if (header->isMarked()) {
1290 info->liveCount[tag] += 1;
1291 info->liveSize += header->size();
1292 // Count objects that are live when promoted to the final generation .
1293 if (age == maxHeapObjectAge - 1)
1294 info->generations[tag][maxHeapObjectAge] += 1;
1295 header->incAge();
haraken 2014/07/14 02:26:14 Do we want to increase the age when age == maxHeap
zerny-chromium 2014/07/28 11:54:41 No, the increment check against maxHeapObjectAge i
zerny-chromium 2014/07/28 11:57:59 (Too quick there) We do want to increment when it
1296 } else {
1297 info->deadCount[tag] += 1;
1298 info->deadSize += header->size();
1299 // Count objects that are dead before the final generation.
1300 if (age < maxHeapObjectAge)
1301 info->generations[tag][age] += 1;
1302 }
1303 }
1304 }
1305 #endif
1306
1231 #if defined(ADDRESS_SANITIZER) 1307 #if defined(ADDRESS_SANITIZER)
1232 template<typename Header> 1308 template<typename Header>
1233 void HeapPage<Header>::poisonUnmarkedObjects() 1309 void HeapPage<Header>::poisonUnmarkedObjects()
1234 { 1310 {
1235 for (Address headerAddress = payload(); headerAddress < end(); ) { 1311 for (Address headerAddress = payload(); headerAddress < end(); ) {
1236 Header* header = reinterpret_cast<Header*>(headerAddress); 1312 Header* header = reinterpret_cast<Header*>(headerAddress);
1237 ASSERT(header->size() < blinkPagePayloadSize()); 1313 ASSERT(header->size() < blinkPagePayloadSize());
1238 1314
1239 if (!header->isFree() && !header->isMarked()) 1315 if (!header->isFree() && !header->isMarked())
1240 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); 1316 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1282 return header->hasVTable(); 1358 return header->hasVTable();
1283 } 1359 }
1284 1360
1285 template<typename Header> 1361 template<typename Header>
1286 void LargeHeapObject<Header>::getStats(HeapStats& stats) 1362 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1287 { 1363 {
1288 stats.increaseAllocatedSpace(size()); 1364 stats.increaseAllocatedSpace(size());
1289 stats.increaseObjectSpace(payloadSize()); 1365 stats.increaseObjectSpace(payloadSize());
1290 } 1366 }
1291 1367
1368 #if GC_PROFILE_HEAP
1369 template<typename Header>
1370 void LargeHeapObject<Header>::snapshot(TracedDictionaryBase* json, ThreadState:: SnapshotInfo* info)
1371 {
1372 Header* header = heapObjectHeader();
1373 size_t tag = info->getClassTag(header->gcInfo());
1374 size_t age = header->age();
1375 if (isMarked()) {
1376 info->liveCount[tag] += 1;
1377 info->liveSize += header->size();
1378 // Count objects that are live when promoted to the final generation.
1379 if (age == maxHeapObjectAge - 1)
1380 info->generations[tag][maxHeapObjectAge] += 1;
1381 header->incAge();
haraken 2014/07/14 02:26:14 Do we want to increase the age when age == maxHeap
zerny-chromium 2014/07/28 11:54:41 Ditto.
1382 } else {
1383 info->deadCount[tag] += 1;
1384 info->deadSize += header->size();
1385 // Count objects that are live when promoted to the final generation.
1386 if (age == maxHeapObjectAge - 1)
1387 info->generations[tag][maxHeapObjectAge] += 1;
1388 header->incAge();
haraken 2014/07/14 02:26:14 Factor out the line 1378 - 1381 and the line 1385
zerny-chromium 2014/07/28 11:54:41 This is a copy-paste error. The dead-branch should
1389 }
1390
1391 if (json) {
1392 json->setInteger("class", tag)
1393 .setInteger("size", header->size())
1394 .setInteger("isMarked", isMarked());
1395 }
1396 }
1397 #endif
1398
1292 template<typename Entry> 1399 template<typename Entry>
1293 void HeapExtentCache<Entry>::flush() 1400 void HeapExtentCache<Entry>::flush()
1294 { 1401 {
1295 if (m_hasEntries) { 1402 if (m_hasEntries) {
1296 for (int i = 0; i < numberOfEntries; i++) 1403 for (int i = 0; i < numberOfEntries; i++)
1297 m_entries[i] = Entry(); 1404 m_entries[i] = Entry();
1298 m_hasEntries = false; 1405 m_hasEntries = false;
1299 } 1406 }
1300 } 1407 }
1301 1408
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
1396 return false; 1503 return false;
1397 } 1504 }
1398 CallbackStack* nextStack = m_next; 1505 CallbackStack* nextStack = m_next;
1399 *first = nextStack; 1506 *first = nextStack;
1400 delete this; 1507 delete this;
1401 return nextStack->popAndInvokeCallback(first, visitor); 1508 return nextStack->popAndInvokeCallback(first, visitor);
1402 } 1509 }
1403 Item* item = --m_current; 1510 Item* item = --m_current;
1404 1511
1405 VisitorCallback callback = item->callback(); 1512 VisitorCallback callback = item->callback();
1406 #if ENABLE(GC_TRACING) 1513 #if GC_PROFILE_MARKING
1407 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback 1514 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback
1408 visitor->setHostInfo(item->object(), classOf(item->object())); 1515 visitor->setHostInfo(item->object(), classOf(item->object()));
1409 #endif 1516 #endif
1410 callback(visitor, item->object()); 1517 callback(visitor, item->object());
1411 1518
1412 return true; 1519 return true;
1413 } 1520 }
1414 1521
1415 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor) 1522 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
1416 { 1523 {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1454 } 1561 }
1455 if (m_next) 1562 if (m_next)
1456 return m_next->hasCallbackForObject(object); 1563 return m_next->hasCallbackForObject(object);
1457 1564
1458 return false; 1565 return false;
1459 } 1566 }
1460 #endif 1567 #endif
1461 1568
1462 class MarkingVisitor : public Visitor { 1569 class MarkingVisitor : public Visitor {
1463 public: 1570 public:
1464 #if ENABLE(GC_TRACING) 1571 #if GC_PROFILE_MARKING
1465 typedef HashSet<uintptr_t> LiveObjectSet; 1572 typedef HashSet<uintptr_t> LiveObjectSet;
1466 typedef HashMap<String, LiveObjectSet> LiveObjectMap; 1573 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1467 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; 1574 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1468 #endif 1575 #endif
1469 1576
1470 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback) 1577 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1471 { 1578 {
1472 ASSERT(header); 1579 ASSERT(header);
1473 ASSERT(objectPointer); 1580 ASSERT(objectPointer);
1474 if (header->isMarked()) 1581 if (header->isMarked())
1475 return; 1582 return;
1476 header->mark(); 1583 header->mark();
1477 #if ENABLE(GC_TRACING) 1584 #if GC_PROFILE_MARKING
1478 MutexLocker locker(objectGraphMutex()); 1585 MutexLocker locker(objectGraphMutex());
1479 String className(classOf(objectPointer)); 1586 String className(classOf(objectPointer));
1480 { 1587 {
1481 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet()); 1588 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet());
1482 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter)); 1589 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter));
1483 } 1590 }
1484 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName)); 1591 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1485 ASSERT(result.isNewEntry); 1592 ASSERT(result.isNewEntry);
1486 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho stObject, className.ascii().data(), objectPointer); 1593 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho stObject, className.ascii().data(), objectPointer);
1487 #endif 1594 #endif
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1573 visitHeader(header, header->payload(), callback); \ 1680 visitHeader(header, header->payload(), callback); \
1574 } \ 1681 } \
1575 virtual bool isMarked(const Type* objectPointer) OVERRIDE \ 1682 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1576 { \ 1683 { \
1577 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \ 1684 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1578 } 1685 }
1579 1686
1580 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS) 1687 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1581 #undef DEFINE_VISITOR_METHODS 1688 #undef DEFINE_VISITOR_METHODS
1582 1689
1583 #if ENABLE(GC_TRACING) 1690 #if GC_PROFILE_MARKING
1584 void reportStats() 1691 void reportStats()
1585 { 1692 {
1586 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n"); 1693 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
1587 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = current lyLive().end(); it != end; ++it) { 1694 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = current lyLive().end(); it != end; ++it) {
1588 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size()); 1695 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
1589 1696
1590 if (it->key == "WebCore::Document") 1697 if (it->key == "WebCore::Document")
1591 reportStillAlive(it->value, previouslyLive().get(it->key)); 1698 reportStillAlive(it->value, previouslyLive().get(it->key));
1592 1699
1593 fprintf(stderr, "\n"); 1700 fprintf(stderr, "\n");
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
1751 1858
1752 #ifdef NDEBUG 1859 #ifdef NDEBUG
1753 s_heapDoesNotContainCache->addEntry(address, true); 1860 s_heapDoesNotContainCache->addEntry(address, true);
1754 #else 1861 #else
1755 if (!s_heapDoesNotContainCache->lookup(address)) 1862 if (!s_heapDoesNotContainCache->lookup(address))
1756 s_heapDoesNotContainCache->addEntry(address, true); 1863 s_heapDoesNotContainCache->addEntry(address, true);
1757 #endif 1864 #endif
1758 return 0; 1865 return 0;
1759 } 1866 }
1760 1867
1761 #if ENABLE(GC_TRACING) 1868 #if GC_PROFILE_MARKING
1762 const GCInfo* Heap::findGCInfo(Address address) 1869 const GCInfo* Heap::findGCInfo(Address address)
1763 { 1870 {
1764 return ThreadState::findGCInfoFromAllThreads(address); 1871 return ThreadState::findGCInfoFromAllThreads(address);
1765 } 1872 }
1873 #endif
1766 1874
1875 #if GC_PROFILE_MARKING
1767 void Heap::dumpPathToObjectOnNextGC(void* p) 1876 void Heap::dumpPathToObjectOnNextGC(void* p)
1768 { 1877 {
1769 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p); 1878 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
1770 } 1879 }
1771 1880
1772 String Heap::createBacktraceString() 1881 String Heap::createBacktraceString()
1773 { 1882 {
1774 int framesToShow = 3; 1883 int framesToShow = 3;
1775 int stackFrameSize = 16; 1884 int stackFrameSize = 16;
1776 ASSERT(stackFrameSize >= framesToShow); 1885 ASSERT(stackFrameSize >= framesToShow);
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
1873 GCScope gcScope(stackState); 1982 GCScope gcScope(stackState);
1874 // Check if we successfully parked the other threads. If not we bail out of the GC. 1983 // Check if we successfully parked the other threads. If not we bail out of the GC.
1875 if (!gcScope.allThreadsParked()) { 1984 if (!gcScope.allThreadsParked()) {
1876 ThreadState::current()->setGCRequested(); 1985 ThreadState::current()->setGCRequested();
1877 return; 1986 return;
1878 } 1987 }
1879 1988
1880 ScriptForbiddenScope forbiddenScope; 1989 ScriptForbiddenScope forbiddenScope;
1881 s_lastGCWasConservative = false; 1990 s_lastGCWasConservative = false;
1882 1991
1883 TRACE_EVENT0("blink", "Heap::collectGarbage"); 1992 TRACE_EVENT0(GC_PROFILE_GROUP, "Heap::collectGarbage");
1884 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink", "BlinkGC"); 1993 TRACE_EVENT_SCOPED_SAMPLING_STATE(GC_PROFILE_GROUP, "BlinkGC");
1885 double timeStamp = WTF::currentTimeMS(); 1994 double timeStamp = WTF::currentTimeMS();
1886 #if ENABLE(GC_TRACING) 1995 #if GC_PROFILE_MARKING
1887 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 1996 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1888 #endif 1997 #endif
1889 1998
1890 // Disallow allocation during garbage collection (but not 1999 // Disallow allocation during garbage collection (but not
1891 // during the finalization that happens when the gcScope is 2000 // during the finalization that happens when the gcScope is
1892 // torn down). 2001 // torn down).
1893 NoAllocationScope<AnyThread> noAllocationScope; 2002 NoAllocationScope<AnyThread> noAllocationScope;
1894 2003
1895 prepareForGC(); 2004 prepareForGC();
1896 2005
(...skipping 16 matching lines...) Expand all
1913 // to do cleanup (specifically clear the queued bits for weak hash 2022 // to do cleanup (specifically clear the queued bits for weak hash
1914 // tables). 2023 // tables).
1915 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } 2024 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1916 2025
1917 CallbackStack::clear(&s_ephemeronStack); 2026 CallbackStack::clear(&s_ephemeronStack);
1918 2027
1919 // It is not permitted to trace pointers of live objects in the weak 2028 // It is not permitted to trace pointers of live objects in the weak
1920 // callback phase, so the marking stack should still be empty here. 2029 // callback phase, so the marking stack should still be empty here.
1921 ASSERT(s_markingStack->isEmpty()); 2030 ASSERT(s_markingStack->isEmpty());
1922 2031
1923 #if ENABLE(GC_TRACING) 2032 #if GC_PROFILE_MARKING
1924 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); 2033 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1925 #endif 2034 #endif
1926 2035
1927 if (blink::Platform::current()) { 2036 if (blink::Platform::current()) {
1928 uint64_t objectSpaceSize; 2037 uint64_t objectSpaceSize;
1929 uint64_t allocatedSpaceSize; 2038 uint64_t allocatedSpaceSize;
1930 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); 2039 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
1931 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); 2040 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1932 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); 2041 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1933 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); 2042 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2002 template class ThreadHeap<HeapObjectHeader>; 2111 template class ThreadHeap<HeapObjectHeader>;
2003 2112
2004 Visitor* Heap::s_markingVisitor; 2113 Visitor* Heap::s_markingVisitor;
2005 CallbackStack* Heap::s_markingStack; 2114 CallbackStack* Heap::s_markingStack;
2006 CallbackStack* Heap::s_weakCallbackStack; 2115 CallbackStack* Heap::s_weakCallbackStack;
2007 CallbackStack* Heap::s_ephemeronStack; 2116 CallbackStack* Heap::s_ephemeronStack;
2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2117 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2009 bool Heap::s_shutdownCalled = false; 2118 bool Heap::s_shutdownCalled = false;
2010 bool Heap::s_lastGCWasConservative = false; 2119 bool Heap::s_lastGCWasConservative = false;
2011 } 2120 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698