| Index: base/trace_event/heap_profiler_allocation_context_tracker.h
|
| diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
|
| index a310cefc77e2546ea0a1db002b67d5bf35f6a05b..632a4eface8740a9415337c41c6d9874fb107367 100644
|
| --- a/base/trace_event/heap_profiler_allocation_context_tracker.h
|
| +++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
|
| @@ -9,18 +9,9 @@
|
|
|
| #include "base/atomicops.h"
|
| #include "base/base_export.h"
|
| -#include "base/debug/debugging_flags.h"
|
| -#include "base/debug/stack_trace.h"
|
| #include "base/logging.h"
|
| #include "base/macros.h"
|
| #include "base/trace_event/heap_profiler_allocation_context.h"
|
| -
|
| -#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL) && \
|
| - (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
|
| -#define ENABLE_NATIVE_ALLOCATION_TRACES 1
|
| -#else
|
| -#define ENABLE_NATIVE_ALLOCATION_TRACES 0
|
| -#endif
|
|
|
| namespace base {
|
| namespace trace_event {
|
| @@ -32,32 +23,24 @@
|
| // details.
|
| class BASE_EXPORT AllocationContextTracker {
|
| public:
|
| - enum class CaptureMode: int32_t {
|
| - DISABLED, // Don't capture anything
|
| - PSEUDO_STACK, // GetContextSnapshot() returns pseudo stack trace
|
| -#if ENABLE_NATIVE_ALLOCATION_TRACES
|
| - NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace
|
| -#endif
|
| - };
|
| + // Globally enables capturing allocation context.
|
| + // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future?
|
| + // Or at least have something that guards agains enable -> disable -> enable?
|
| + static void SetCaptureEnabled(bool enabled);
|
|
|
| - // Globally sets capturing mode.
|
| - // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
|
| - static void SetCaptureMode(CaptureMode mode);
|
| -
|
| - // Returns global capturing mode.
|
| - inline static CaptureMode capture_mode() {
|
| + // Returns whether capturing allocation context is enabled globally.
|
| + inline static bool capture_enabled() {
|
| // A little lag after heap profiling is enabled or disabled is fine, it is
|
| // more important that the check is as cheap as possible when capturing is
|
| // not enabled, so do not issue a memory barrier in the fast path.
|
| - if (subtle::NoBarrier_Load(&capture_mode_) ==
|
| - static_cast<int32_t>(CaptureMode::DISABLED))
|
| - return CaptureMode::DISABLED;
|
| + if (subtle::NoBarrier_Load(&capture_enabled_) == 0)
|
| + return false;
|
|
|
| // In the slow path, an acquire load is required to pair with the release
|
| - // store in |SetCaptureMode|. This is to ensure that the TLS slot for
|
| + // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for
|
| // the thread-local allocation context tracker has been initialized if
|
| - // |capture_mode| returns something other than DISABLED.
|
| - return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
|
| + // |capture_enabled| returns true.
|
| + return subtle::Acquire_Load(&capture_enabled_) != 0;
|
| }
|
|
|
| // Returns the thread-local instance, creating one if necessary. Returns
|
| @@ -97,7 +80,7 @@
|
| private:
|
| AllocationContextTracker();
|
|
|
| - static subtle::Atomic32 capture_mode_;
|
| + static subtle::Atomic32 capture_enabled_;
|
|
|
| // The pseudo stack where frames are |TRACE_EVENT| names.
|
| std::vector<const char*> pseudo_stack_;
|
|
|