| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <iterator> | 8 #include <iterator> |
| 9 | 9 |
| 10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 68 // When enabling capturing, also initialize the TLS slot. This does not create | 68 // When enabling capturing, also initialize the TLS slot. This does not create |
| 69 // a TLS instance yet. | 69 // a TLS instance yet. |
| 70 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) | 70 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) |
| 71 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); | 71 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); |
| 72 | 72 |
| 73 // Release ordering ensures that when a thread observes |capture_enabled_| to | 73 // Release ordering ensures that when a thread observes |capture_enabled_| to |
| 74 // be true through an acquire load, the TLS slot has been initialized. | 74 // be true through an acquire load, the TLS slot has been initialized. |
| 75 subtle::Release_Store(&capture_enabled_, enabled); | 75 subtle::Release_Store(&capture_enabled_, enabled); |
| 76 } | 76 } |
| 77 | 77 |
| 78 void AllocationContextTracker::PushPseudoStackFrame( | 78 void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) { |
| 79 const char* trace_event_name) { | |
| 80 // Impose a limit on the height to verify that every push is popped, because | 79 // Impose a limit on the height to verify that every push is popped, because |
| 81 // in practice the pseudo stack never grows higher than ~20 frames. | 80 // in practice the pseudo stack never grows higher than ~20 frames. |
| 82 if (pseudo_stack_.size() < kMaxStackDepth) | 81 if (pseudo_stack_.size() < kMaxStackDepth) |
| 83 pseudo_stack_.push_back(trace_event_name); | 82 pseudo_stack_.push_back(frame); |
| 84 else | 83 else |
| 85 NOTREACHED(); | 84 NOTREACHED(); |
| 86 } | 85 } |
| 87 | 86 |
| 88 void AllocationContextTracker::PopPseudoStackFrame( | 87 // static |
| 89 const char* trace_event_name) { | 88 void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) { |
| 90 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in | 89 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in |
| 91 // scope, the frame was never pushed, so it is possible that pop is called | 90 // scope, the frame was never pushed, so it is possible that pop is called |
| 92 // on an empty stack. | 91 // on an empty stack. |
| 93 if (pseudo_stack_.empty()) | 92 if (pseudo_stack_.empty()) |
| 94 return; | 93 return; |
| 95 | 94 |
| 96 // Assert that pushes and pops are nested correctly. This DCHECK can be | 95 // Assert that pushes and pops are nested correctly. This DCHECK can be |
| 97 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call | 96 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call |
| 98 // without a corresponding TRACE_EVENT_BEGIN). | 97 // without a corresponding TRACE_EVENT_BEGIN). |
| 99 DCHECK_EQ(trace_event_name, pseudo_stack_.back()) | 98 DCHECK_EQ(frame, pseudo_stack_.back()) |
| 100 << "Encountered an unmatched TRACE_EVENT_END"; | 99 << "Encountered an unmatched TRACE_EVENT_END"; |
| 101 | 100 |
| 102 pseudo_stack_.pop_back(); | 101 pseudo_stack_.pop_back(); |
| 103 } | 102 } |
| 104 | 103 |
| 105 void AllocationContextTracker::PushCurrentTaskContext(const char* context) { | 104 void AllocationContextTracker::PushCurrentTaskContext(const char* context) { |
| 106 DCHECK(context); | 105 DCHECK(context); |
| 107 if (task_contexts_.size() < kMaxTaskDepth) | 106 if (task_contexts_.size() < kMaxTaskDepth) |
| 108 task_contexts_.push_back(context); | 107 task_contexts_.push_back(context); |
| 109 else | 108 else |
| 110 NOTREACHED(); | 109 NOTREACHED(); |
| 111 } | 110 } |
| 112 | 111 |
| 113 void AllocationContextTracker::PopCurrentTaskContext(const char* context) { | 112 void AllocationContextTracker::PopCurrentTaskContext(const char* context) { |
| 114 DCHECK_EQ(context, task_contexts_.back()) | 113 DCHECK_EQ(context, task_contexts_.back()) |
| 115 << "Encountered an unmatched context end"; | 114 << "Encountered an unmatched context end"; |
| 116 task_contexts_.pop_back(); | 115 task_contexts_.pop_back(); |
| 117 } | 116 } |
| 118 | 117 |
| 119 // static | 118 // static |
| 120 AllocationContext AllocationContextTracker::GetContextSnapshot() { | 119 AllocationContext AllocationContextTracker::GetContextSnapshot() { |
| 121 AllocationContext ctx; | 120 AllocationContext ctx; |
| 122 | 121 |
| 123 // Fill the backtrace. | 122 // Fill the backtrace. |
| 124 { | 123 { |
| 125 auto backtrace = std::begin(ctx.backtrace.frames); | 124 auto src = pseudo_stack_.begin(); |
| 126 auto backtrace_end = std::end(ctx.backtrace.frames); | 125 auto dst = std::begin(ctx.backtrace.frames); |
| 126 auto src_end = pseudo_stack_.end(); |
| 127 auto dst_end = std::end(ctx.backtrace.frames); |
| 127 | 128 |
| 128 // Add the thread name as the first entry | 129 // Add the thread name as the first enrty in the backtrace. |
| 129 if (thread_name_) { | 130 if (thread_name_) { |
| 130 *backtrace++ = StackFrame::FromThreadName(thread_name_); | 131 DCHECK(dst < dst_end); |
| 132 *dst = thread_name_; |
| 133 ++dst; |
| 131 } | 134 } |
| 132 | 135 |
| 133 for (const char* event_name: pseudo_stack_) { | 136 // Copy as much of the bottom of the pseudo stack into the backtrace as |
| 134 if (backtrace == backtrace_end) { | 137 // possible. |
| 135 break; | 138 for (; src != src_end && dst != dst_end; src++, dst++) |
| 136 } | 139 *dst = *src; |
| 137 *backtrace++ = StackFrame::FromTraceEventName(event_name); | |
| 138 } | |
| 139 | 140 |
| 140 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames); | 141 // If there is room for more, fill the remaining slots with empty frames. |
| 142 std::fill(dst, dst_end, nullptr); |
| 141 } | 143 } |
| 142 | 144 |
| 143 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension | 145 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension |
| 144 // (component name) in the heap profiler and not piggy back on the type name. | 146 // (component name) in the heap profiler and not piggy back on the type name. |
| 145 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back(); | 147 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back(); |
| 146 | 148 |
| 147 return ctx; | 149 return ctx; |
| 148 } | 150 } |
| 149 | 151 |
| 150 } // namespace trace_event | 152 } // namespace trace_event |
| 151 } // namespace base | 153 } // namespace base |
| OLD | NEW |