Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(282)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 1980743002: Track thread activities in order to diagnose hangs. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@readwrite-mmf
Patch Set: addressed review comments Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/activity_tracker.h"
6
7 #include <atomic>
8
9 #include "base/files/memory_mapped_file.h"
10 #include "base/logging.h"
11 #include "base/memory/ptr_util.h"
12 #include "base/metrics/field_trial.h"
13 #include "base/pending_task.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16
17 namespace base {
18 namespace debug {
19
20 namespace {
21
22 // A number that identifies the memory as having been initialized. It's
23 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker).
24 // A version number is added on so that major structure changes won't try to
25 // read an older version (since the cookie won't match).
26 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1
27
28 // The minimum depth a stack should support.
29 const int kMinStackDepth = 2;
30
31 // Type identifiers used when storing in persistent memory so they can be
32 // identified during extraction; the first 4 bytes of the SHA1 of the name
33 // is used as a unique integer. A "version number" is added to the base
34 // so that, if the structure of that object changes, stored older versions
35 // will be safely ignored.
36 enum : uint32_t {
37 kTypeIdActivityTracker = 0x5D7381AF + 1, // SHA1(ActivityTracker) v1
38 kTypeIdActivityTrackerFree = 0x3F0272FB, // SHA1(ActivityTrackerFree)
39 };
40
41 } // namespace
42
43 const char kActivityTrackingFeatureName[] = "ActivityTracking";
44
45 void SetupGlobalActivityTrackerFieldTrial() {
46 // TODO(bcwhite): Adjust these numbers once there is real data to show
47 // just how much of an arena is necessary.
48 const size_t kMemorySize = 1 << 20; // 1 MiB
49 const int kStackDepth = 3;
50 const uint64_t kAllocatorId = 0;
51 const char kAllocatorName[] = "ActivityTracker";
52
53 const std::string group_name =
54 FieldTrialList::FindFullName(kActivityTrackingFeatureName);
55 if (group_name.empty() || group_name == "Disabled")
56 return;
57
58 if (group_name == "InMemory") {
59 GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, kAllocatorId,
60 kAllocatorName, kStackDepth);
61 } else {
62 NOTREACHED() << group_name;
63 }
64 }
65
66
67 struct ThreadActivityTracker::Header {
68 // This unique number indicates a valid initialization of the memory.
69 uint64_t cookie;
70
71 // The thread-id to which this data belongs. This identifier is not
72 // guaranteed to mean anything, just to be unique among all active
73 // trackers.
74 uint64_t thread_id;
75
76 // The start-time and start-ticks when the data was created. Each activity
77 // record has a |time_ticks| value that can be converted to a "wall time"
78 // with these two values.
79 int64_t start_time;
80 int64_t start_ticks;
81
82 // The number of Activity slots in the data.
83 uint32_t slots;
84
85 // The current depth of the stack. This may be greater than the number of
86 // slots. If the depth exceeds the number of slots, the newest entries
87 // won't be recorded.
88 std::atomic<uint32_t> depth;
89
90 // A memory location used to indicate if changes have been made to the stack
91 // that would invalidate an in-progress read of its contents. The active
92 // tracker will zero the value whenever something gets popped from the
93 // stack. A monitoring tracker can write a non-zero value here, copy the
94 // stack contents, and read the value to know, if it is still non-zero, that
95 // the contents didn't change while being copied. This is a "natural word"
96 // to ensure the best performance across different CPU architectures.
97 std::atomic<int> unchanged;
98
99 // The name of the thread (up to a maximum length). Dynamic-length names
100 // are not practical since the memory has to come from the same persistent
101 // allocator that holds this structure and to which this object has no
102 // reference.
103 char name[32];
104 };
105
106 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
107 : header_(static_cast<Header*>(base)),
108 stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) +
109 sizeof(Header))),
110 stack_slots_((size - sizeof(Header)) / sizeof(StackEntry)) {
111 DCHECK(thread_checker_.CalledOnValidThread());
112 DCHECK(base);
113
114 // Ensure there is enough space for the header and at least a few records.
115 DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(StackEntry), size);
116
117 // Ensure that the |stack_slots_| calculation didn't overflow.
118 DCHECK_GE(std::numeric_limits<uint32_t>::max(),
119 (size - sizeof(Header)) / sizeof(StackEntry));
120
121 // Provided memory should either be completely initialized or all zeros.
122 if (header_->cookie == 0) {
123 // This is a new file. Double-check other fields and then initialize.
124 DCHECK_EQ(0U, header_->thread_id);
125 DCHECK_EQ(0, header_->start_time);
126 DCHECK_EQ(0, header_->start_ticks);
127 DCHECK_EQ(0U, header_->slots);
128 DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed));
129 DCHECK_EQ(0, header_->unchanged.load(std::memory_order_relaxed));
130 DCHECK_EQ(0, stack_[0].time_ticks);
131 DCHECK_EQ(0, stack_[0].source_address);
132 DCHECK_EQ(0, stack_[0].method_address);
133 DCHECK_EQ(0U, stack_[0].sequence_id);
134
135 header_->cookie = kHeaderCookie;
136 header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId());
137 header_->start_time = base::Time::Now().ToInternalValue();
138 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
139 header_->slots = stack_slots_;
140 strlcpy(header_->name, PlatformThread::GetName(), sizeof(header_->name));
141 valid_ = true;
142 } else {
143 // This is a file with existing data. Perform basic consistency checks.
144 if (header_->cookie != kHeaderCookie ||
145 header_->slots != stack_slots_ ||
146 header_->start_time > base::Time::Now().ToInternalValue())
147 return;
148 valid_ = true;
149 }
150 }
151
152 ThreadActivityTracker::~ThreadActivityTracker() {}
153
154 void ThreadActivityTracker::PushActivity(const void* source,
155 ActivityType activity,
156 intptr_t method,
157 uint64_t sequence) {
158 DCHECK(thread_checker_.CalledOnValidThread());
159
160 // Get the current depth of the stack. No access to other memory guarded
161 // by this variable is done here so a "relaxed" load is acceptable.
162 uint32_t depth = header_->depth.load(std::memory_order_relaxed);
163
164 // Handle the case where the stack depth has exceeded the storage capacity.
165 // Extra entries will be lost leaving only the base of the stack.
166 if (depth >= stack_slots_) {
167 // Since no other memory is being modified, a "relaxed" store is acceptable.
168 header_->depth.store(depth + 1, std::memory_order_relaxed);
169 return;
170 }
171
172 // Get a pointer to the next entry and load it. No atomicity is required
173 // here because the memory is known only to this thread. It will be made
174 // known to other threads once the depth is incremented.
175 StackEntry* entry = &stack_[depth];
176 entry->time_ticks = base::TimeTicks::Now().ToInternalValue();
177 entry->activity_type = activity;
178 entry->source_address = reinterpret_cast<intptr_t>(source);
179 entry->method_address = method;
180 entry->sequence_id = sequence;
181
182 // Save the incremented depth. Because this guards |entry| memory filled
183 // above that may be read by another thread once the recorded depth changes,
184 // a "release" store is required.
185 header_->depth.store(depth + 1, std::memory_order_release);
186 }
187
188 void ThreadActivityTracker::PopActivity(const void* source) {
189 DCHECK(thread_checker_.CalledOnValidThread());
190
191 // Do an atomic decrement of the depth. No changes to stack entries guarded
192 // by this variable is done here so a "relaxed" operation is acceptable.
193 // |depth| will receive the value before it was modified.
194 uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed);
195
196 // Validate that everything is running correctly.
197 DCHECK_LT(0U, depth);
198 if (depth <= stack_slots_) {
199 DCHECK_EQ(reinterpret_cast<intptr_t>(source),
200 stack_[depth - 1].source_address);
201 }
202
203 // The stack has shrunk meaning that some other thread trying to copy the
204 // contents for reporting purposes could get bad data. That thread would
205 // have written a non-zero value into |unchanged|; clearing it here will
206 // let that thread detect that something did change. It doesn't matter
207 // when this is done relative to the atomic |depth| operation above so a
208 // "relaxed" access is acceptable.
209 header_->unchanged.store(0, std::memory_order_relaxed);
210 }
211
212 std::unique_ptr<ThreadActivityAnalyzer>
213 ThreadActivityTracker::CreateAnalyzer() {
214 return WrapUnique(
215 new ThreadActivityAnalyzer(header_, SizeForStackDepth(stack_slots_)));
216 }
217
218 // static
219 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
220 return static_cast<size_t>(stack_depth) * sizeof(StackEntry) + sizeof(Header);
221 }
222
223 ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
224 : ThreadActivityTracker(base, size) {}
225
226 ThreadActivityAnalyzer::~ThreadActivityAnalyzer() {}
227
228 uint32_t ThreadActivityAnalyzer::SnapshotStack(
229 std::vector<StackEntry>* snapshot) {
230 // It's possible for the data to change while reading it in such a way that it
231 // invalidates the read. Make several attempts but don't try forever.
232 const int kMaxAttempts = 10;
233 uint32_t depth;
234
235 // Start with an empty return stack.
236 snapshot->clear();
237
238 // Stop here if the data isn't valid.
239 if (!is_valid())
240 return 0;
241
242 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
243 // Write a non-zero value to |unchanged| so it's possible to detect at
244 // the end that nothing has changed since copying the data began.
245 header()->unchanged.store(1, std::memory_order_relaxed);
246
247 // Fetching the current depth also "acquires" the contents of the stack.
248 depth = header()->depth.load(std::memory_order_acquire);
249 if (depth == 0)
250 return 0;
251
252 // Copy the existing contents. Memcpy is used for speed.
253 uint32_t count = std::min(depth, stack_slots());
254 snapshot->resize(count);
255 memcpy(&(*snapshot)[0], stack(), count * sizeof(StackEntry));
256
257 // Check to make sure everything was unchanged during the copy.
258 if (header()->unchanged.load(std::memory_order_relaxed))
259 return depth;
260 }
261
262 // If all attempts failed, just return the depth with no content.
263 snapshot->clear();
264 return depth;
265 }
266
267
268 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
269
270 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
271 PersistentMemoryAllocator::Reference mem_reference,
272 void* base,
273 size_t size)
274 : ThreadActivityTracker(base, size),
275 mem_reference_(mem_reference),
276 mem_base_(base) {}
277
278 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
279 // The global |g_tracker_| must point to the owner of this class since all
280 // objects of this type must be destructed before |g_tracker_| can be changed.
281 DCHECK(g_tracker_);
282 g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_);
283 }
284
285 void GlobalActivityTracker::CreateWithAllocator(
286 std::unique_ptr<PersistentMemoryAllocator> allocator,
287 int stack_depth) {
288 // There's no need to do anything with the result. It is self-managing.
289 new GlobalActivityTracker(std::move(allocator), stack_depth);
290 }
291
292 // static
293 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
294 uint64_t id,
295 StringPiece name,
296 int stack_depth) {
297 CreateWithAllocator(
298 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
299 stack_depth);
300 }
301
302 // static
303 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
304 size_t size,
305 uint64_t id,
306 StringPiece name,
307 int stack_depth) {
308 // Create the file, overwriting anything that was there previously, and set
309 // the length. This will create a space that is zero-filled, a requirement
310 // for operation.
311 File file(file_path,
312 File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE);
313 file.SetLength(size);
314
315 // Map the file into memory and make it globally available.
316 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
317 mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
318 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator(
319 std::move(mapped_file), id, name)),
320 stack_depth);
321 }
322
323 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
324 DCHECK(!this_thread_tracker_.Get());
325
326 // The lock must be acquired to access the STL data structures.
327 AutoLock auto_lock(lock_);
328
329 PersistentMemoryAllocator::Reference mem_reference;
330 void* mem_base;
331 if (!available_memories_.empty()) {
332 // There is a memory block that was previously released (and zero'd) so
333 // just re-use that rather than allocating a new one.
334 mem_reference = available_memories_.back();
335 available_memories_.pop_back();
336 mem_base = allocator_->GetAsObject<char>(mem_reference,
337 kTypeIdActivityTrackerFree);
338 DCHECK(mem_base);
339 DCHECK_LT(stack_memory_, allocator_->GetAllocSize(mem_reference));
340 allocator_->SetType(mem_reference, kTypeIdActivityTracker);
341 } else {
342 // Allocate a block of memory from the persistent segment.
343 mem_reference = allocator_->Allocate(stack_memory_, kTypeIdActivityTracker);
344 if (mem_reference) {
345 // Success. Convert the reference to an actual memory address.
346 mem_base =
347 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
348 } else {
349 // Failure. This should never happen.
350 NOTREACHED();
351 // But if it does, handle it gracefully by allocating the required
352 // memory from the heap.
353 mem_base = new char[stack_memory_];
354 memset(mem_base, 0, stack_memory_);
355 }
356 }
357
358 // Create a tracker with the acquired memory and set it as the tracker
359 // for this particular thread in thread-local-storage.
360 ManagedActivityTracker* tracker =
361 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_);
362 DCHECK(tracker->is_valid());
363 thread_trackers_.insert(tracker);
364 this_thread_tracker_.Set(tracker);
365
366 return tracker;
367 }
368
369 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
370 ThreadActivityTracker* tracker =
371 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
372 if (tracker) {
373 this_thread_tracker_.Free();
374 delete tracker;
375 }
376 }
377
378 GlobalActivityTracker::GlobalActivityTracker(
379 std::unique_ptr<PersistentMemoryAllocator> allocator,
380 int stack_depth)
381 : allocator_(std::move(allocator)),
382 stack_memory_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
383 this_thread_tracker_(&OnTLSDestroy) {
384 // Ensure the passed memory is valid and empty (iterator finds nothing).
385 uint32_t type;
386 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
387
388 // Ensure that there is no other global object and then make this one such.
389 DCHECK(!g_tracker_);
390 g_tracker_ = this;
391
392 // Create a tracker for this thread since it is known.
393 CreateTrackerForCurrentThread();
394 }
395
396 GlobalActivityTracker::~GlobalActivityTracker() {
397 DCHECK_EQ(g_tracker_, this);
398 DCHECK_EQ(0U, thread_trackers_.size());
399 g_tracker_ = nullptr;
400 }
401
402 void GlobalActivityTracker::ReturnTrackerMemory(
403 ManagedActivityTracker* tracker,
404 PersistentMemoryAllocator::Reference mem_reference,
405 void* mem_base) {
406 // Zero the memory so that it is ready for use if needed again later. It's
407 // better to clear the memory now, when a thread is exiting, than to do it
408 // when it is first needed by a thread doing actual work.
409 memset(mem_base, 0, stack_memory_);
410
411 // Access to STL structurs requires a lock because this could get called
412 // from any thread.
413 AutoLock auto_lock(lock_);
414
415 // Remove the destructed tracker from the set of known ones.
416 DCHECK(ContainsKey(thread_trackers_, tracker));
417 thread_trackers_.erase(tracker);
418
419 // Deal with the memory that was used by the tracker.
420 if (mem_reference) {
421 // The memory was within the persistent memory allocator. Change its type
422 // so that iteration won't find it.
423 allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree);
424 // There is no way to free memory from a persistent allocator so instead
425 // keep it on the internal list of available memory blocks.
426 DCHECK_LE(stack_memory_, allocator_->GetAllocSize(mem_reference));
427 available_memories_.push_back(mem_reference);
428 } else {
429 // The memory was allocated from the process heap. This shouldn't happen
430 // because the persistent memory segment should be big enough for all
431 // thread stacks but it's better to support falling back to allocation
432 // from the heap rather than crash. Everything will work as normal but
433 // the data won't be persisted.
434 delete[] reinterpret_cast<char*>(mem_base);
435 }
436 }
437
438 // static
439 void GlobalActivityTracker::OnTLSDestroy(void* value) {
440 delete reinterpret_cast<ManagedActivityTracker*>(value);
441 }
442
443
444 ScopedTaskActivity::ScopedTaskActivity(const PendingTask& task)
445 : GlobalActivityTracker::ScopedThreadActivity(
446 task.posted_from.program_counter(),
447 ThreadActivityTracker::ACT_TASK,
448 0,
449 task.sequence_num) {}
450
451 } // namespace debug
452 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698