Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(196)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 1124763003: Update from https://crrev.com/327068 (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: update nacl, buildtools, fix display_change_notifier_unittest Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/atomic_sequence_num.h" 9 #include "base/atomic_sequence_num.h"
10 #include "base/compiler_specific.h" 10 #include "base/compiler_specific.h"
11 #include "base/trace_event/memory_dump_provider.h" 11 #include "base/trace_event/memory_dump_provider.h"
12 #include "base/trace_event/memory_dump_session_state.h"
12 #include "base/trace_event/process_memory_dump.h" 13 #include "base/trace_event/process_memory_dump.h"
13 #include "base/trace_event/trace_event_argument.h" 14 #include "base/trace_event/trace_event_argument.h"
14 15
16 #if defined(OS_LINUX) || defined(OS_ANDROID)
17 #include "base/trace_event/malloc_dump_provider.h"
18 #include "base/trace_event/process_memory_maps_dump_provider.h"
19 #include "base/trace_event/process_memory_totals_dump_provider.h"
20 #elif defined(OS_WIN)
21 #include "base/trace_event/winheap_dump_provider_win.h"
22 #endif
23
15 namespace base { 24 namespace base {
16 namespace trace_event { 25 namespace trace_event {
17 26
18 namespace { 27 namespace {
19 28
29 // TODO(primiano): this should be smarter and should do something similar to
30 // trace event synthetic delays.
31 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
32
20 MemoryDumpManager* g_instance_for_testing = nullptr; 33 MemoryDumpManager* g_instance_for_testing = nullptr;
34 const int kDumpIntervalSeconds = 2;
21 const int kTraceEventNumArgs = 1; 35 const int kTraceEventNumArgs = 1;
22 const char* kTraceEventArgNames[] = {"dumps"}; 36 const char* kTraceEventArgNames[] = {"dumps"};
23 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; 37 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
24 StaticAtomicSequenceNumber g_next_guid; 38 StaticAtomicSequenceNumber g_next_guid;
25 39
26 const char* DumpPointTypeToString(const DumpPointType& dump_point_type) { 40 const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
27 switch (dump_point_type) { 41 switch (dump_type) {
28 case DumpPointType::TASK_BEGIN: 42 case MemoryDumpType::TASK_BEGIN:
29 return "TASK_BEGIN"; 43 return "TASK_BEGIN";
30 case DumpPointType::TASK_END: 44 case MemoryDumpType::TASK_END:
31 return "TASK_END"; 45 return "TASK_END";
32 case DumpPointType::PERIODIC_INTERVAL: 46 case MemoryDumpType::PERIODIC_INTERVAL:
33 return "PERIODIC_INTERVAL"; 47 return "PERIODIC_INTERVAL";
34 case DumpPointType::EXPLICITLY_TRIGGERED: 48 case MemoryDumpType::EXPLICITLY_TRIGGERED:
35 return "EXPLICITLY_TRIGGERED"; 49 return "EXPLICITLY_TRIGGERED";
36 } 50 }
37 NOTREACHED(); 51 NOTREACHED();
38 return "UNKNOWN"; 52 return "UNKNOWN";
39 } 53 }
40 54
55 // Internal class used to hold details about ProcessMemoryDump requests for the
56 // current process.
57 // TODO(primiano): In the upcoming CLs, ProcessMemoryDump will become async.
58 // and this class will be used to convey more details across PostTask()s.
59 class ProcessMemoryDumpHolder
60 : public RefCountedThreadSafe<ProcessMemoryDumpHolder> {
61 public:
62 ProcessMemoryDumpHolder(
63 MemoryDumpRequestArgs req_args,
64 const scoped_refptr<MemoryDumpSessionState>& session_state,
65 MemoryDumpCallback callback)
66 : process_memory_dump(session_state),
67 req_args(req_args),
68 callback(callback),
69 task_runner(MessageLoop::current()->task_runner()),
70 num_pending_async_requests(0) {}
71
72 ProcessMemoryDump process_memory_dump;
73 const MemoryDumpRequestArgs req_args;
74
75 // Callback passed to the initial call to CreateProcessDump().
76 MemoryDumpCallback callback;
77
78 // Thread on which FinalizeDumpAndAddToTrace() should be called, which is the
79 // same that invoked the initial CreateProcessDump().
80 const scoped_refptr<SingleThreadTaskRunner> task_runner;
81
82 // Number of pending ContinueAsyncProcessDump() calls.
83 int num_pending_async_requests;
84
85 private:
86 friend class RefCountedThreadSafe<ProcessMemoryDumpHolder>;
87 virtual ~ProcessMemoryDumpHolder() {}
88 DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpHolder);
89 };
90
91 void FinalizeDumpAndAddToTrace(
92 const scoped_refptr<ProcessMemoryDumpHolder>& pmd_holder) {
93 DCHECK_EQ(0, pmd_holder->num_pending_async_requests);
94
95 if (!pmd_holder->task_runner->BelongsToCurrentThread()) {
96 pmd_holder->task_runner->PostTask(
97 FROM_HERE, Bind(&FinalizeDumpAndAddToTrace, pmd_holder));
98 return;
99 }
100
101 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
102 pmd_holder->process_memory_dump.AsValueInto(
103 static_cast<TracedValue*>(event_value.get()));
104 const char* const event_name =
105 MemoryDumpTypeToString(pmd_holder->req_args.dump_type);
106
107 TRACE_EVENT_API_ADD_TRACE_EVENT(
108 TRACE_EVENT_PHASE_MEMORY_DUMP,
109 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
110 pmd_holder->req_args.dump_guid, kTraceEventNumArgs, kTraceEventArgNames,
111 kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
112 TRACE_EVENT_FLAG_HAS_ID);
113
114 if (!pmd_holder->callback.is_null()) {
115 pmd_holder->callback.Run(pmd_holder->req_args.dump_guid, true);
116 pmd_holder->callback.Reset();
117 }
118 }
119
120 void RequestPeriodicGlobalDump() {
121 MemoryDumpManager::GetInstance()->RequestGlobalDump(
122 MemoryDumpType::PERIODIC_INTERVAL);
123 }
124
41 } // namespace 125 } // namespace
42 126
43 // TODO(primiano): this should be smarter and should do something similar to 127 // static
44 // trace event synthetic delays. 128 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory;
45 const char MemoryDumpManager::kTraceCategory[] =
46 TRACE_DISABLED_BY_DEFAULT("memory-dumps");
47 129
48 // static 130 // static
49 MemoryDumpManager* MemoryDumpManager::GetInstance() { 131 MemoryDumpManager* MemoryDumpManager::GetInstance() {
50 if (g_instance_for_testing) 132 if (g_instance_for_testing)
51 return g_instance_for_testing; 133 return g_instance_for_testing;
52 134
53 return Singleton<MemoryDumpManager, 135 return Singleton<MemoryDumpManager,
54 LeakySingletonTraits<MemoryDumpManager>>::get(); 136 LeakySingletonTraits<MemoryDumpManager>>::get();
55 } 137 }
56 138
57 // static 139 // static
58 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { 140 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
141 if (instance)
142 instance->skip_core_dumpers_auto_registration_for_testing_ = true;
59 g_instance_for_testing = instance; 143 g_instance_for_testing = instance;
60 } 144 }
61 145
62 MemoryDumpManager::MemoryDumpManager() 146 MemoryDumpManager::MemoryDumpManager()
63 : dump_provider_currently_active_(nullptr), memory_tracing_enabled_(0) { 147 : dump_provider_currently_active_(nullptr),
148 delegate_(nullptr),
149 memory_tracing_enabled_(0),
150 skip_core_dumpers_auto_registration_for_testing_(false) {
151 g_next_guid.GetNext(); // Make sure that first guid is not zero.
64 } 152 }
65 153
66 MemoryDumpManager::~MemoryDumpManager() { 154 MemoryDumpManager::~MemoryDumpManager() {
67 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); 155 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
68 } 156 }
69 157
70 void MemoryDumpManager::Initialize() { 158 void MemoryDumpManager::Initialize() {
71 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. 159 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
72 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this); 160 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
161
162 if (skip_core_dumpers_auto_registration_for_testing_)
163 return;
164
165 #if defined(OS_LINUX) || defined(OS_ANDROID)
166 // Enable the core dump providers.
167 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
168 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
169 RegisterDumpProvider(MallocDumpProvider::GetInstance());
170 #elif defined(OS_WIN)
171 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
172 #endif
173 }
174
175 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
176 AutoLock lock(lock_);
177 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
178 delegate_ = delegate;
73 } 179 }
74 180
75 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { 181 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
76 AutoLock lock(lock_); 182 AutoLock lock(lock_);
77 if (std::find(dump_providers_registered_.begin(), 183 dump_providers_registered_.insert(mdp);
78 dump_providers_registered_.end(),
79 mdp) != dump_providers_registered_.end()) {
80 return;
81 }
82 dump_providers_registered_.push_back(mdp);
83 } 184 }
84 185
85 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { 186 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
86 AutoLock lock(lock_); 187 AutoLock lock(lock_);
87 188
88 // Remove from the registered providers list. 189 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
89 auto it = std::find(dump_providers_registered_.begin(), 190 // only if the MDP has specified a thread affinity (via task_runner()) AND
90 dump_providers_registered_.end(), mdp); 191 // the unregistration happens on the same thread (so the MDP cannot unregister
91 if (it != dump_providers_registered_.end()) 192 // and DumpInto() at the same time).
92 dump_providers_registered_.erase(it); 193 // Otherwise, it is not possible to guarantee that its unregistration is
194 // race-free. If you hit this DCHECK, your MDP has a bug.
195 DCHECK_IMPLIES(
196 subtle::NoBarrier_Load(&memory_tracing_enabled_),
197 mdp->task_runner() && mdp->task_runner()->BelongsToCurrentThread())
198 << "The MemoryDumpProvider " << mdp->GetFriendlyName() << " attempted to "
199 << "unregister itself in a racy way. Please file a crbug.";
93 200
94 // Remove from the enabled providers list. This is to deal with the case that 201 // Remove from the enabled providers list. This is to deal with the case that
95 // UnregisterDumpProvider is called while the trace is enabled. 202 // UnregisterDumpProvider is called while the trace is enabled.
96 it = std::find(dump_providers_enabled_.begin(), dump_providers_enabled_.end(), 203 dump_providers_enabled_.erase(mdp);
97 mdp); 204 dump_providers_registered_.erase(mdp);
98 if (it != dump_providers_enabled_.end())
99 dump_providers_enabled_.erase(it);
100 } 205 }
101 206
102 void MemoryDumpManager::RequestDumpPoint(DumpPointType dump_point_type) { 207 void MemoryDumpManager::RequestGlobalDump(
103 // TODO(primiano): this will have more logic to coordinate dump points across 208 MemoryDumpType dump_type,
104 // multiple processes via IPC. See crbug.com/462930. 209 const MemoryDumpCallback& callback) {
105
106 // Bail out immediately if tracing is not enabled at all. 210 // Bail out immediately if tracing is not enabled at all.
107 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) 211 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)))
108 return; 212 return;
109 213
110 // TODO(primiano): Make guid actually unique (cross-process) by hashing it 214 const uint64 guid =
111 // with the PID. See crbug.com/462931 for details. 215 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
112 const uint64 guid = g_next_guid.GetNext(); 216
113 CreateLocalDumpPoint(dump_point_type, guid); 217 // The delegate_ is supposed to be thread safe, immutable and long lived.
218 // No need to keep the lock after we ensure that a delegate has been set.
219 MemoryDumpManagerDelegate* delegate;
220 {
221 AutoLock lock(lock_);
222 delegate = delegate_;
223 }
224
225 if (delegate) {
226 // The delegate is in charge to coordinate the request among all the
227 // processes and call the CreateLocalDumpPoint on the local process.
228 MemoryDumpRequestArgs args = {guid, dump_type};
229 delegate->RequestGlobalMemoryDump(args, callback);
230 } else if (!callback.is_null()) {
231 callback.Run(guid, false /* success */);
232 }
114 } 233 }
115 234
116 void MemoryDumpManager::BroadcastDumpRequest() { 235 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) {
117 NOTREACHED(); // TODO(primiano): implement IPC synchronization. 236 RequestGlobalDump(dump_type, MemoryDumpCallback());
118 } 237 }
119 238
120 // Creates a dump point for the current process and appends it to the trace. 239 // Creates a memory dump for the current process and appends it to the trace.
121 void MemoryDumpManager::CreateLocalDumpPoint(DumpPointType dump_point_type, 240 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
122 uint64 guid) { 241 const MemoryDumpCallback& callback) {
242 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder(
243 new ProcessMemoryDumpHolder(args, session_state_, callback));
244 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump;
123 bool did_any_provider_dump = false; 245 bool did_any_provider_dump = false;
124 scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump());
125 246
126 // Serialize dump point generation so that memory dump providers don't have to 247 // Iterate over the active dump providers and invoke DumpInto(pmd).
127 // deal with thread safety. 248 // The MDM guarantees linearity (at most one MDP is active within one
249 // process) and thread-safety (MDM enforces the right locking when entering /
250 // leaving the MDP.DumpInto() call). This is to simplify the clients' design
251 // and not let the MDPs worry about locking.
252 // As regards thread affinity, depending on the MDP configuration (see
253 // memory_dump_provider.h), the DumpInto() invocation can happen:
254 // - Synchronousy on the MDM thread, when MDP.task_runner() is not set.
255 // - Posted on MDP.task_runner(), when MDP.task_runner() is set.
128 { 256 {
129 AutoLock lock(lock_); 257 AutoLock lock(lock_);
130 for (auto it = dump_providers_enabled_.begin(); 258 for (auto dump_provider_iter = dump_providers_enabled_.begin();
131 it != dump_providers_enabled_.end();) { 259 dump_provider_iter != dump_providers_enabled_.end();) {
132 dump_provider_currently_active_ = *it; 260 // InvokeDumpProviderLocked will remove the MDP from the set if it fails.
133 if (dump_provider_currently_active_->DumpInto(pmd.get())) { 261 MemoryDumpProvider* mdp = *dump_provider_iter;
134 did_any_provider_dump = true; 262 ++dump_provider_iter;
135 ++it; 263 if (mdp->task_runner()) {
264 // The DumpInto() call must be posted.
265 bool did_post_async_task = mdp->task_runner()->PostTask(
266 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
267 Unretained(this), Unretained(mdp), pmd_holder));
268 // The thread underlying the TaskRunner might have gone away.
269 if (did_post_async_task)
270 ++pmd_holder->num_pending_async_requests;
136 } else { 271 } else {
137 LOG(ERROR) << "The memory dumper " 272 // Invoke the dump provider synchronously.
138 << dump_provider_currently_active_->GetFriendlyName() 273 did_any_provider_dump |= InvokeDumpProviderLocked(mdp, pmd);
139 << " failed, possibly due to sandboxing (crbug.com/461788), "
140 "disabling it for current process. Try restarting chrome "
141 "with the --no-sandbox switch.";
142 it = dump_providers_enabled_.erase(it);
143 } 274 }
144 dump_provider_currently_active_ = nullptr;
145 } 275 }
276 } // AutoLock
277
278 // If at least one synchronous provider did dump and there are no pending
279 // asynchronous requests, add the dump to the trace and invoke the callback
280 // straight away (FinalizeDumpAndAddToTrace() takes care of the callback).
281 if (did_any_provider_dump && pmd_holder->num_pending_async_requests == 0)
282 FinalizeDumpAndAddToTrace(pmd_holder);
283 }
284
285 // Invokes the MemoryDumpProvider.DumpInto(), taking care of the failsafe logic
286 // which disables the dumper when failing (crbug.com/461788).
287 bool MemoryDumpManager::InvokeDumpProviderLocked(MemoryDumpProvider* mdp,
288 ProcessMemoryDump* pmd) {
289 lock_.AssertAcquired();
290 dump_provider_currently_active_ = mdp;
291 bool dump_successful = mdp->DumpInto(pmd);
292 dump_provider_currently_active_ = nullptr;
293 if (!dump_successful) {
294 LOG(ERROR) << "The memory dumper " << mdp->GetFriendlyName()
295 << " failed, possibly due to sandboxing (crbug.com/461788), "
296 "disabling it for current process. Try restarting chrome "
297 "with the --no-sandbox switch.";
298 dump_providers_enabled_.erase(mdp);
146 } 299 }
300 return dump_successful;
301 }
147 302
148 // Don't create a dump point if all the dumpers failed. 303 // This is posted to arbitrary threads as a continuation of CreateProcessDump(),
149 if (!did_any_provider_dump) 304 // when one or more MemoryDumpProvider(s) require the DumpInto() call to happen
150 return; 305 // on a different thread.
306 void MemoryDumpManager::ContinueAsyncProcessDump(
307 MemoryDumpProvider* mdp,
308 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder) {
309 bool should_finalize_dump = false;
310 {
311 // The lock here is to guarantee that different asynchronous dumps on
312 // different threads are still serialized, so that the MemoryDumpProvider
313 // has a consistent view of the |pmd| argument passed.
314 AutoLock lock(lock_);
315 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump;
151 316
152 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue()); 317 // Check if the MemoryDumpProvider is still there. It might have been
153 pmd->AsValueInto(static_cast<TracedValue*>(event_value.get())); 318 // destroyed and unregistered while hopping threads.
154 const char* const event_name = DumpPointTypeToString(dump_point_type); 319 if (dump_providers_enabled_.count(mdp))
320 InvokeDumpProviderLocked(mdp, pmd);
155 321
156 TRACE_EVENT_API_ADD_TRACE_EVENT( 322 // Finalize the dump appending it to the trace if this was the last
157 TRACE_EVENT_PHASE_MEMORY_DUMP, 323 // asynchronous request pending.
158 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, guid, 324 --pmd_holder->num_pending_async_requests;
159 kTraceEventNumArgs, kTraceEventArgNames, kTraceEventArgTypes, 325 if (pmd_holder->num_pending_async_requests == 0)
160 NULL /* arg_values */, &event_value, TRACE_EVENT_FLAG_HAS_ID); 326 should_finalize_dump = true;
327 } // AutoLock(lock_)
328
329 if (should_finalize_dump)
330 FinalizeDumpAndAddToTrace(pmd_holder);
161 } 331 }
162 332
163 void MemoryDumpManager::OnTraceLogEnabled() { 333 void MemoryDumpManager::OnTraceLogEnabled() {
164 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter 334 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
165 // to figure out (and cache) which dumpers should be enabled or not. 335 // to figure out (and cache) which dumpers should be enabled or not.
166 // For the moment piggy back everything on the generic "memory" category. 336 // For the moment piggy back everything on the generic "memory" category.
167 bool enabled; 337 bool enabled;
168 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); 338 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
169 339
170 AutoLock lock(lock_); 340 AutoLock lock(lock_);
171 if (enabled) { 341
172 dump_providers_enabled_.assign(dump_providers_registered_.begin(), 342 // There is no point starting the tracing without a delegate.
173 dump_providers_registered_.end()); 343 if (!enabled || !delegate_) {
174 } else {
175 dump_providers_enabled_.clear(); 344 dump_providers_enabled_.clear();
345 return;
176 } 346 }
347
348 // Merge the dictionary of allocator attributes from all dump providers
349 // into the session state.
350 session_state_ = new MemoryDumpSessionState();
351 for (const MemoryDumpProvider* mdp : dump_providers_registered_) {
352 session_state_->allocators_attributes_type_info.Update(
353 mdp->allocator_attributes_type_info());
354 }
355 dump_providers_enabled_ = dump_providers_registered_;
177 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 356 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
357
358 if (delegate_->IsCoordinatorProcess()) {
359 periodic_dump_timer_.Start(FROM_HERE,
360 TimeDelta::FromSeconds(kDumpIntervalSeconds),
361 base::Bind(&RequestPeriodicGlobalDump));
362 }
178 } 363 }
179 364
180 void MemoryDumpManager::OnTraceLogDisabled() { 365 void MemoryDumpManager::OnTraceLogDisabled() {
181 AutoLock lock(lock_); 366 AutoLock lock(lock_);
367 periodic_dump_timer_.Stop();
182 dump_providers_enabled_.clear(); 368 dump_providers_enabled_.clear();
183 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 369 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
370 session_state_ = nullptr;
184 } 371 }
185 372
186 } // namespace trace_event 373 } // namespace trace_event
187 } // namespace base 374 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698