OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 20 matching lines...) Expand all Loading... | |
31 #include "config.h" | 31 #include "config.h" |
32 #include "platform/heap/ThreadState.h" | 32 #include "platform/heap/ThreadState.h" |
33 | 33 |
34 #include "platform/ScriptForbiddenScope.h" | 34 #include "platform/ScriptForbiddenScope.h" |
35 #include "platform/TraceEvent.h" | 35 #include "platform/TraceEvent.h" |
36 #include "platform/heap/AddressSanitizer.h" | 36 #include "platform/heap/AddressSanitizer.h" |
37 #include "platform/heap/Handle.h" | 37 #include "platform/heap/Handle.h" |
38 #include "platform/heap/Heap.h" | 38 #include "platform/heap/Heap.h" |
39 #include "public/platform/Platform.h" | 39 #include "public/platform/Platform.h" |
40 #include "wtf/ThreadingPrimitives.h" | 40 #include "wtf/ThreadingPrimitives.h" |
41 #if ENABLE(GC_PROFILE_HEAP) | |
42 #include "platform/TracedValue.h" | |
43 #endif | |
41 | 44 |
42 #if OS(WIN) | 45 #if OS(WIN) |
43 #include <stddef.h> | 46 #include <stddef.h> |
44 #include <windows.h> | 47 #include <windows.h> |
45 #include <winnt.h> | 48 #include <winnt.h> |
46 #elif defined(__GLIBC__) | 49 #elif defined(__GLIBC__) |
47 extern "C" void* __libc_stack_end; // NOLINT | 50 extern "C" void* __libc_stack_end; // NOLINT |
48 #endif | 51 #endif |
49 | 52 |
50 #if defined(MEMORY_SANITIZER) | 53 #if defined(MEMORY_SANITIZER) |
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
560 // Whether or not the pointer was within an object it was certainly | 563 // Whether or not the pointer was within an object it was certainly |
561 // within a page that is part of the heap, so we don't want to ask the | 564 // within a page that is part of the heap, so we don't want to ask the |
562 // other other heaps or put this address in the | 565 // other other heaps or put this address in the |
563 // HeapDoesNotContainCache. | 566 // HeapDoesNotContainCache. |
564 return true; | 567 return true; |
565 } | 568 } |
566 | 569 |
567 return false; | 570 return false; |
568 } | 571 } |
569 | 572 |
570 #if ENABLE(GC_TRACING) | 573 #if ENABLE(GC_PROFILE_MARKING) |
571 const GCInfo* ThreadState::findGCInfo(Address address) | 574 const GCInfo* ThreadState::findGCInfo(Address address) |
572 { | 575 { |
573 BaseHeapPage* page = heapPageFromAddress(address); | 576 BaseHeapPage* page = heapPageFromAddress(address); |
574 if (page) { | 577 if (page) { |
575 return page->findGCInfo(address); | 578 return page->findGCInfo(address); |
576 } | 579 } |
577 return 0; | 580 return 0; |
578 } | 581 } |
579 #endif | 582 #endif |
580 | 583 |
584 #if ENABLE(GC_PROFILE_HEAP) | |
585 size_t ThreadState::SnapshotInfo::getClassTag(const GCInfo* gcinfo) | |
586 { | |
587 HashMap<const GCInfo*, size_t>::AddResult result = classTags.add(gcinfo, cla ssTags.size()); | |
588 if (result.isNewEntry) { | |
589 liveCount.append(0); | |
590 deadCount.append(0); | |
591 generations.append(Vector<int, 8>()); | |
592 generations.last().fill(0, 8); | |
593 } | |
594 return result.storedValue->value; | |
595 } | |
596 | |
597 void ThreadState::snapshot() | |
598 { | |
599 SnapshotInfo info(this); | |
600 TracedValue json; | |
601 | |
602 #define SNAPSHOT_HEAP(HeapType) \ | |
603 { \ | |
604 TracedDictionary<TracedArray<TracedValue> >& jsonHeap = heaps.beginDicti onary(); \ | |
605 jsonHeap.setString("name", #HeapType); \ | |
606 m_heaps[HeapType##Heap]->snapshot(&jsonHeap, &info); \ | |
607 jsonHeap.endDictionary(); \ | |
608 } | |
609 TracedArray<TracedValue>& heaps = json.beginArray("heaps"); | |
610 SNAPSHOT_HEAP(General); | |
611 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); | |
612 heaps.endArray(); | |
613 #undef SNAPSHOT_HEAP | |
614 | |
615 json.setInteger("allocatedSpace", m_stats.totalAllocatedSpace()) | |
616 .setInteger("objectSpace", m_stats.totalObjectSpace()) | |
617 .setInteger("liveSize", info.liveSize) | |
618 .setInteger("deadSize", info.deadSize) | |
619 .setInteger("freeSize", info.freeSize) | |
620 .setInteger("pageCount", info.freeSize); | |
621 | |
622 Vector<String> classNameVector(info.classTags.size()); | |
623 for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); i t != info.classTags.end(); ++it) | |
624 classNameVector[it->value] = it->key->m_className; | |
625 | |
626 TracedArray<TracedValue>& jsonClasses = json.beginArray("classes"); | |
627 for (size_t i = 0; i < classNameVector.size(); ++i) { | |
628 TracedDictionary<TracedArray<TracedValue> >& jsonClass = jsonClasses.beg inDictionary(); | |
629 jsonClass | |
630 .setString("name", classNameVector[i]) | |
631 .setInteger("liveCount", info.liveCount[i]) | |
632 .setInteger("deadCount", info.deadCount[i]); | |
633 TracedArray<TracedDictionary<TracedArray<TracedValue> > >& jsonGens = js onClass.beginArray("generations"); | |
634 for (size_t j = 0; j < heapObjectGenerations; ++j) | |
635 jsonGens.pushInteger(info.generations[i][j]); | |
636 jsonGens.endArray(); | |
637 jsonClass.endDictionary(); | |
638 } | |
639 jsonClasses.endArray(); | |
640 | |
641 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blinkGC", "ThreadState", this, json.fin ish()); | |
642 } | |
643 #endif | |
644 | |
581 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) | 645 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) |
582 { | 646 { |
583 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 647 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
584 *slot = CallbackStack::Item(object, callback); | 648 *slot = CallbackStack::Item(object, callback); |
585 } | 649 } |
586 | 650 |
587 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) | 651 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) |
588 { | 652 { |
589 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); | 653 return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weak CallbackStack, visitor); |
590 } | 654 } |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
875 for (size_t i = 0; i < slotCount; ++i) { | 939 for (size_t i = 0; i < slotCount; ++i) { |
876 m_safePointStackCopy[i] = from[i]; | 940 m_safePointStackCopy[i] = from[i]; |
877 } | 941 } |
878 } | 942 } |
879 | 943 |
880 void ThreadState::performPendingSweep() | 944 void ThreadState::performPendingSweep() |
881 { | 945 { |
882 if (!sweepRequested()) | 946 if (!sweepRequested()) |
883 return; | 947 return; |
884 | 948 |
885 TRACE_EVENT0("blink", "ThreadState::performPendingSweep"); | 949 #if ENABLE(GC_PROFILE_HEAP) |
950 // We snapshot the heap prior to sweeping to get numbers for both resources | |
951 // that have been allocated since the last GC and for resources that are | |
952 // going to be freed. | |
953 bool gcTracingEnabled; | |
954 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blinkGC", &gcTracingEnabled); | |
tkent
2014/08/05 00:38:10
Ditto.
| |
955 if (gcTracingEnabled && m_stats.totalObjectSpace() > 0) | |
956 snapshot(); | |
957 #endif | |
958 | |
959 TRACE_EVENT0("blinkGC", "ThreadState::performPendingSweep"); | |
886 ScriptForbiddenScope forbiddenScope; | 960 ScriptForbiddenScope forbiddenScope; |
887 | 961 |
888 double timeStamp = WTF::currentTimeMS(); | 962 double timeStamp = WTF::currentTimeMS(); |
889 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 963 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
890 if (isMainThread()) | 964 if (isMainThread()) |
891 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); | 965 TRACE_EVENT_SET_SAMPLING_STATE("blinkGC", "BlinkGCSweeping"); |
892 | 966 |
893 m_sweepInProgress = true; | 967 m_sweepInProgress = true; |
894 // Disallow allocation during weak processing. | 968 // Disallow allocation during weak processing. |
895 enterNoAllocationScope(); | 969 enterNoAllocationScope(); |
896 // Perform thread-specific weak processing. | 970 // Perform thread-specific weak processing. |
897 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 971 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
898 leaveNoAllocationScope(); | 972 leaveNoAllocationScope(); |
899 // Perform sweeping and finalization. | 973 // Perform sweeping and finalization. |
900 m_stats.clear(); // Sweeping will recalculate the stats | 974 m_stats.clear(); // Sweeping will recalculate the stats |
901 for (int i = 0; i < NumberOfHeaps; i++) | 975 for (int i = 0; i < NumberOfHeaps; i++) |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
942 ASSERT(!state->isAtSafePoint()); | 1016 ASSERT(!state->isAtSafePoint()); |
943 state->safePoint(HeapPointersOnStack); | 1017 state->safePoint(HeapPointersOnStack); |
944 } | 1018 } |
945 | 1019 |
946 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 1020 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
947 { | 1021 { |
948 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 1022 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
949 return threads; | 1023 return threads; |
950 } | 1024 } |
951 | 1025 |
952 #if ENABLE(GC_TRACING) | 1026 #if ENABLE(GC_PROFILE_MARKING) |
953 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) | 1027 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) |
954 { | 1028 { |
955 bool needLockForIteration = !isAnyThreadInGC(); | 1029 bool needLockForIteration = !isAnyThreadInGC(); |
956 if (needLockForIteration) | 1030 if (needLockForIteration) |
957 threadAttachMutex().lock(); | 1031 threadAttachMutex().lock(); |
958 | 1032 |
959 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); | 1033 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); |
960 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1034 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
961 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1035 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
962 if (needLockForIteration) | 1036 if (needLockForIteration) |
963 threadAttachMutex().unlock(); | 1037 threadAttachMutex().unlock(); |
964 return gcInfo; | 1038 return gcInfo; |
965 } | 1039 } |
966 } | 1040 } |
967 if (needLockForIteration) | 1041 if (needLockForIteration) |
968 threadAttachMutex().unlock(); | 1042 threadAttachMutex().unlock(); |
969 return 0; | 1043 return 0; |
970 } | 1044 } |
971 #endif | 1045 #endif |
1046 | |
972 } | 1047 } |
OLD | NEW |