Chromium Code Reviews| Index: base/debug/trace_memory.cc |
| diff --git a/base/debug/trace_memory.cc b/base/debug/trace_memory.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..65bec56be135a3354ff3e02acf59651c9995c55f |
| --- /dev/null |
| +++ b/base/debug/trace_memory.cc |
| @@ -0,0 +1,236 @@ |
| +// Copyright 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/debug/trace_memory.h" |
| + |
| +#include "base/debug/leak_annotations.h" |
| +#include "base/debug/trace_event.h" |
| +#include "base/lazy_instance.h" |
| +#include "base/logging.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/message_loop.h" |
| +#include "base/threading/thread_local.h" |
| + |
| +// TODO(jamescook): Windows support for memory tracing. |
| +#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined(OS_ANDROID)) |
|
dsinclair
2013/06/18 15:30:15
Could this be done through gyp? Have a trace_memor
James Cook
2013/06/29 00:02:42
I think we follow this pattern of using ifdefs oth
|
| +#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h" |
| +#endif |
| + |
| +namespace base { |
| +namespace debug { |
| + |
| +namespace { |
| + |
| +// Maximum number of nested TRACE_MEMORY scopes to record. Must be greater than |
| +// or equal to HeapProfileTable::kMaxStackDepth. |
| +const int kMaxStackSize = 32; |
| + |
| +///////////////////////////////////////////////////////////////////////////// |
| +// Holds a memory dump until the tracing system needs to serialize it. |
| +class MemoryDumpHolder : public base::debug::ConvertableToTraceFormat { |
| + public: |
| + // Takes ownership of dump, which must be a JSON string, allocated with |
| + // malloc() and NULL terminated. |
| + explicit MemoryDumpHolder(char* dump) : dump_(dump) {} |
| + virtual ~MemoryDumpHolder() { free(dump_); } |
| + |
| + // base::debug::ConvertableToTraceFormat overrides: |
| + virtual void AppendAsTraceFormat(std::string* out) const OVERRIDE { |
| + out->append(dump_); |
| + } |
| + |
| + private: |
| + char* dump_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(MemoryDumpHolder); |
| +}; |
| + |
| +///////////////////////////////////////////////////////////////////////////// |
| +// Records a stack of TRACE_MEMORY events. One per thread is required. |
| +struct TraceMemoryStack { |
| + TraceMemoryStack() : index_(0) { |
| + memset(category_stack_, 0, kMaxStackSize * sizeof(const char*)); |
| + } |
| + |
| + // Points to the next free entry. |
| + int index_; |
| + const char* category_stack_[kMaxStackSize]; |
| +}; |
| + |
| +// One stack of TRACE_MEMORY event data per thread. |
| +LazyInstance<ThreadLocalPointer<TraceMemoryStack> >::Leaky trace_memory_stack = |
|
dsinclair
2013/06/18 15:30:15
Not sure if it's in the styleguide, but in trace_e
James Cook
2013/06/29 00:02:42
Style guide says it is optional to use g_, but I l
|
| + LAZY_INSTANCE_INITIALIZER; |
| + |
| +// Initializes the thread-local trace memory stack and returns it. |
| +TraceMemoryStack* InitTraceMemoryStack() { |
| + DCHECK(!trace_memory_stack.Get().Get()); |
| + // Intentionally leak one stack per thread. |
| + TraceMemoryStack* leaked_stack = new TraceMemoryStack; |
| + ANNOTATE_LEAKING_OBJECT_PTR(leaked_stack); |
| + trace_memory_stack.Get().Set(leaked_stack); |
| + return trace_memory_stack.Get().Get(); |
| +} |
| + |
| +// Returns a "pseudo-stack" of pointers to trace events. |
| +// TODO(jamescook): Record both category and name, perhaps in a pair for speed. |
| +int GetPseudoStack(void** stack_out) { |
|
dsinclair
2013/06/18 15:30:15
category_stack_ has const char*, why use void* ins
|
| + TraceMemoryStack* stack = trace_memory_stack.Get().Get(); |
| + // If the tracing system isn't fully initialized, just skip this allocation. |
| + // Attempting to initialize will allocate memory, causing this function to |
| + // be called recursively from inside the allocator. |
| + if (!stack) |
| + return 0; |
| + // Copy out a maximum of kMaxStackSize stack entries. |
| + const int count = |
| + stack->index_ < kMaxStackSize ? stack->index_ : kMaxStackSize; |
| + // Notes that memcpy() works for zero bytes. |
| + memcpy(stack_out, stack->category_stack_, count * sizeof(void*)); |
| + return count; |
| +} |
| + |
| +// Caller owns the returned char* and must release it with free(). |
| +char* TraceMemoryDumpAsString() { |
| +#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined(OS_ANDROID)) |
| + DVLOG(1) << "TraceMemoryDumpAsString"; |
| + return ::GetHeapProfile(); |
| +#else |
| + NOTREACHED(); |
| + return NULL; |
| +#endif |
| +} |
| + |
| +// If memory tracing is enabled, dumps a memory profile to the tracing system. |
| +void DumpMemoryProfile() { |
| + DVLOG(1) << "DumpMemoryProfile"; |
| + // Don't trace allocations here in the memory tracing system. |
| + TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), TRACE_MEMORY_IGNORE); |
| + // Check to see if tracing is enabled for the memory category. |
| + bool enabled; |
| + TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("memory"), |
| + &enabled); |
| + if (enabled) { |
| + // MemoryDumpHolder takes ownership of this string. |
| + char* dump = TraceMemoryDumpAsString(); |
| + scoped_ptr<MemoryDumpHolder> dump_holder(new MemoryDumpHolder(dump)); |
| + const int kSnapshotId = 1; |
| + TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
| + "memory", |
|
dsinclair
2013/06/18 15:30:15
TRACE_DISABLED_BY_DEFAULT("memory") ?
James Cook
2013/06/29 00:02:42
Yes, good catch.
|
| + "memory::Heap", |
| + kSnapshotId, |
| + dump_holder.PassAs<base::debug::ConvertableToTraceFormat>()); |
| + } |
| +} |
| + |
| +void TraceMemoryStart() { |
| +#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined(OS_ANDROID)) |
| + DVLOG(1) << "Starting trace memory"; |
| + // Ensure thread-local-storage is initialized by creating a dummy event. |
| + ScopedTraceMemory initialize(TRACE_MEMORY_IGNORE); |
|
dsinclair
2013/06/18 15:30:15
Wouldn't it be easier to get just get the stack an
James Cook
2013/06/29 00:02:42
Changed InitTraceMemoryStack() to GetTraceMemorySt
|
| + ::SetPseudoStackGenerator(&GetPseudoStack); |
| + ::HeapProfilerStart(NULL); |
| +#else |
| + NOTREACHED(); |
| +#endif |
| +} |
| + |
| +void TraceMemoryStop() { |
| +#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined(OS_ANDROID)) |
| + DVLOG(1) << "Stopping trace memory"; |
| + ::HeapProfilerStop(); |
| +#else |
| + NOTREACHED(); |
| +#endif |
| +} |
| + |
| +} // namespace |
| + |
| +////////////////////////////////////////////////////////////////////////////// |
| + |
| +TraceMemoryTraceLogObserver::TraceMemoryTraceLogObserver( |
| + scoped_refptr<MessageLoopProxy> message_loop_proxy) |
| + : message_loop_proxy_(message_loop_proxy), |
| + weak_factory_(this) { |
| + TraceLog::GetInstance()->AddEnabledStateObserver(this); |
| +} |
| + |
| +TraceMemoryTraceLogObserver::~TraceMemoryTraceLogObserver() { |
| + if (dump_timer_.IsRunning()) |
| + StopProfiling(); |
| + TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
| +} |
| + |
| + // base::debug::TraceLog::EnabledStateChangedObserver overrides: |
| +void TraceMemoryTraceLogObserver::OnTraceLogEnabled() { |
| + DVLOG(1) << "OnTraceLogEnabled"; |
| + DCHECK(message_loop_proxy_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&TraceMemoryTraceLogObserver::StartProfiling, |
| + weak_factory_.GetWeakPtr()))); |
| +} |
| + |
| +void TraceMemoryTraceLogObserver::OnTraceLogDisabled() { |
| + DVLOG(1) << "OnTraceLogDisabled"; |
| + DCHECK(message_loop_proxy_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&TraceMemoryTraceLogObserver::StopProfiling, |
| + weak_factory_.GetWeakPtr()))); |
| +} |
| + |
| +void TraceMemoryTraceLogObserver::StartProfiling() { |
| + TraceMemoryStart(); |
|
dsinclair
2013/06/18 15:30:15
Should this do a:
if (dump_timer_.IsRunning())
James Cook
2013/06/29 00:02:42
Good idea. Done.
|
| + const int kDumpIntervalSeconds = 5; |
| + dump_timer_.Start(FROM_HERE, |
| + TimeDelta::FromSeconds(kDumpIntervalSeconds), |
| + base::Bind(&DumpMemoryProfile)); |
| +} |
| + |
| +void TraceMemoryTraceLogObserver::StopProfiling() { |
| + dump_timer_.Stop(); |
| + TraceMemoryStop(); |
| +} |
| + |
| +bool TraceMemoryTraceLogObserver::IsTimerRunningForTest() const { |
| + return dump_timer_.IsRunning(); |
| +} |
| + |
| +///////////////////////////////////////////////////////////////////////////// |
| + |
| +ScopedTraceMemory::ScopedTraceMemory(const char* category) { |
| + // Get our thread's copy of the stack. |
| + TraceMemoryStack* stack = trace_memory_stack.Get().Get(); |
| + if (!stack) |
| + stack = InitTraceMemoryStack(); |
|
dsinclair
2013/06/18 15:30:15
If InitTraceMemoryStack() always returns this can
James Cook
2013/06/29 00:02:42
Done.
|
| + const int index = stack->index_; |
| + // Allow deep nesting of stacks (needed for tests), but only record |
| + // |kMaxStackSize| entries. |
| + if (index < kMaxStackSize) |
| + stack->category_stack_[index] = category; |
| + stack->index_++; |
| +} |
| + |
| +ScopedTraceMemory::~ScopedTraceMemory() { |
| + // Get our thread's copy of the stack. |
| + TraceMemoryStack* stack = trace_memory_stack.Get().Get(); |
| + stack->index_--; |
| + DCHECK_GE(stack->index_, 0) << "stack underflow"; |
| +} |
| + |
| +// static |
| +int ScopedTraceMemory::GetStackIndexForTest() { |
| + TraceMemoryStack* stack = trace_memory_stack.Get().Get(); |
| + if (!stack) |
| + stack = InitTraceMemoryStack(); |
| + return stack->index_; |
| +} |
| + |
| +// static |
| +const char* ScopedTraceMemory::GetItemForTest(int index) { |
| + TraceMemoryStack* stack = trace_memory_stack.Get().Get(); |
| + if (!stack) |
| + stack = InitTraceMemoryStack(); |
| + return stack->category_stack_[index]; |
| +} |
| + |
| +} // namespace debug |
| +} // namespace base |