OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/debug/activity_tracker.h" |
| 6 |
| 7 #include <atomic> |
| 8 |
| 9 #include "base/files/file.h" |
| 10 #include "base/files/file_path.h" |
| 11 #include "base/files/memory_mapped_file.h" |
| 12 #include "base/logging.h" |
| 13 #include "base/memory/ptr_util.h" |
| 14 #include "base/metrics/field_trial.h" |
| 15 #include "base/pending_task.h" |
| 16 #include "base/stl_util.h" |
| 17 #include "base/strings/string_util.h" |
| 18 |
| 19 namespace base { |
| 20 namespace debug { |
| 21 |
| 22 namespace { |
| 23 |
| 24 // A number that identifies the memory as having been initialized. It's |
| 25 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker). |
| 26 // A version number is added on so that major structure changes won't try to |
| 27 // read an older version (since the cookie won't match). |
| 28 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1 |
| 29 |
| 30 // The minimum depth a stack should support. |
| 31 const int kMinStackDepth = 2; |
| 32 |
| 33 const Feature kActivityTrackerFeature{ |
| 34 "ActivityTracking", FEATURE_DISABLED_BY_DEFAULT |
| 35 }; |
| 36 |
| 37 } // namespace |
| 38 |
| 39 |
| 40 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) { |
| 41 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature)) |
| 42 return; |
| 43 |
| 44 // TODO(bcwhite): Adjust these numbers once there is real data to show |
| 45 // just how much of an arena is necessary. |
| 46 const size_t kMemorySize = 1 << 20; // 1 MiB |
| 47 const int kStackDepth = 3; |
| 48 const uint64_t kAllocatorId = 0; |
| 49 const char kAllocatorName[] = "ActivityTracker"; |
| 50 |
| 51 GlobalActivityTracker::CreateWithFile( |
| 52 file.AddExtension(PersistentMemoryAllocator::kFileExtension), |
| 53 kMemorySize, kAllocatorId, kAllocatorName, kStackDepth); |
| 54 } |
| 55 |
| 56 // This information is kept for every thread that is tracked. It is filled |
| 57 // the very first time the thread is seen. All fields must be of exact sizes |
| 58 // so there is no issue moving between 32 and 64-bit builds. |
| 59 struct ThreadActivityTracker::Header { |
| 60 // This unique number indicates a valid initialization of the memory. |
| 61 uint64_t cookie; |
| 62 |
| 63 // The thread-id to which this data belongs. This identifier is not |
| 64 // guaranteed to mean anything, just to be unique among all active |
| 65 // trackers. |
| 66 uint64_t thread_id; |
| 67 |
| 68 // The start-time and start-ticks when the data was created. Each activity |
| 69 // record has a |time_ticks| value that can be converted to a "wall time" |
| 70 // with these two values. |
| 71 int64_t start_time; |
| 72 int64_t start_ticks; |
| 73 |
| 74 // The number of Activity slots in the data. |
| 75 uint32_t slots; |
| 76 |
| 77 // The current depth of the stack. This may be greater than the number of |
| 78 // slots. If the depth exceeds the number of slots, the newest entries |
| 79 // won't be recorded. |
| 80 std::atomic<uint32_t> depth; |
| 81 |
| 82 // A memory location used to indicate if changes have been made to the stack |
| 83 // that would invalidate an in-progress read of its contents. The active |
| 84 // tracker will zero the value whenever something gets popped from the |
| 85 // stack. A monitoring tracker can write a non-zero value here, copy the |
| 86 // stack contents, and read the value to know, if it is still non-zero, that |
| 87 // the contents didn't change while being copied. |
| 88 std::atomic<uint32_t> unchanged; |
| 89 |
| 90 // The name of the thread (up to a maximum length). Dynamic-length names |
| 91 // are not practical since the memory has to come from the same persistent |
| 92 // allocator that holds this structure and to which this object has no |
| 93 // reference. |
| 94 char name[32]; |
| 95 }; |
| 96 |
| 97 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
| 98 : header_(static_cast<Header*>(base)), |
| 99 stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) + |
| 100 sizeof(Header))), |
| 101 stack_slots_((size - sizeof(Header)) / sizeof(StackEntry)) { |
| 102 DCHECK(thread_checker_.CalledOnValidThread()); |
| 103 DCHECK(base); |
| 104 |
| 105 // Ensure there is enough space for the header and at least a few records. |
| 106 DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(StackEntry), size); |
| 107 |
| 108 // Ensure that the |stack_slots_| calculation didn't overflow. |
| 109 DCHECK_GE(std::numeric_limits<uint32_t>::max(), |
| 110 (size - sizeof(Header)) / sizeof(StackEntry)); |
| 111 |
| 112 // Provided memory should either be completely initialized or all zeros. |
| 113 if (header_->cookie == 0) { |
| 114 // This is a new file. Double-check other fields and then initialize. |
| 115 DCHECK_EQ(0U, header_->thread_id); |
| 116 DCHECK_EQ(0, header_->start_time); |
| 117 DCHECK_EQ(0, header_->start_ticks); |
| 118 DCHECK_EQ(0U, header_->slots); |
| 119 DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed)); |
| 120 DCHECK_EQ(0U, header_->unchanged.load(std::memory_order_relaxed)); |
| 121 DCHECK_EQ(0, stack_[0].time_ticks); |
| 122 DCHECK_EQ(0, stack_[0].source_address); |
| 123 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); |
| 124 |
| 125 header_->cookie = kHeaderCookie; |
| 126 header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId()); |
| 127 header_->start_time = base::Time::Now().ToInternalValue(); |
| 128 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); |
| 129 header_->slots = stack_slots_; |
| 130 strlcpy(header_->name, PlatformThread::GetName(), sizeof(header_->name)); |
| 131 valid_ = true; |
| 132 } else { |
| 133 // This is a file with existing data. Perform basic consistency checks. |
| 134 if (header_->cookie != kHeaderCookie || |
| 135 header_->slots != stack_slots_ || |
| 136 header_->start_time > base::Time::Now().ToInternalValue()) |
| 137 return; |
| 138 valid_ = true; |
| 139 } |
| 140 } |
| 141 |
| 142 ThreadActivityTracker::~ThreadActivityTracker() {} |
| 143 |
| 144 void ThreadActivityTracker::PushActivity(const void* source, |
| 145 ActivityType activity, |
| 146 const StackEntryData& data) { |
| 147 DCHECK(thread_checker_.CalledOnValidThread()); |
| 148 |
| 149 // Get the current depth of the stack. No access to other memory guarded |
| 150 // by this variable is done here so a "relaxed" load is acceptable. |
| 151 uint32_t depth = header_->depth.load(std::memory_order_relaxed); |
| 152 |
| 153 // Handle the case where the stack depth has exceeded the storage capacity. |
| 154 // Extra entries will be lost leaving only the base of the stack. |
| 155 if (depth >= stack_slots_) { |
| 156 // Since no other memory is being modified, a "relaxed" store is acceptable. |
| 157 header_->depth.store(depth + 1, std::memory_order_relaxed); |
| 158 return; |
| 159 } |
| 160 |
| 161 // Get a pointer to the next entry and load it. No atomicity is required |
| 162 // here because the memory is known only to this thread. It will be made |
| 163 // known to other threads once the depth is incremented. |
| 164 StackEntry* entry = &stack_[depth]; |
| 165 entry->time_ticks = base::TimeTicks::Now().ToInternalValue(); |
| 166 entry->source_address = reinterpret_cast<intptr_t>(source); |
| 167 entry->activity_type = activity; |
| 168 entry->data = data; |
| 169 |
| 170 // Save the incremented depth. Because this guards |entry| memory filled |
| 171 // above that may be read by another thread once the recorded depth changes, |
| 172 // a "release" store is required. |
| 173 header_->depth.store(depth + 1, std::memory_order_release); |
| 174 } |
| 175 |
| 176 void ThreadActivityTracker::PopActivity(const void* source) { |
| 177 DCHECK(thread_checker_.CalledOnValidThread()); |
| 178 |
| 179 // Do an atomic decrement of the depth. No changes to stack entries guarded |
| 180 // by this variable is done here so a "relaxed" operation is acceptable. |
| 181 // |depth| will receive the value before it was modified. |
| 182 uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed); |
| 183 |
| 184 // Validate that everything is running correctly. |
| 185 DCHECK_LT(0U, depth); |
| 186 if (depth <= stack_slots_) { |
| 187 DCHECK_EQ(reinterpret_cast<intptr_t>(source), |
| 188 stack_[depth - 1].source_address); |
| 189 } |
| 190 |
| 191 // The stack has shrunk meaning that some other thread trying to copy the |
| 192 // contents for reporting purposes could get bad data. That thread would |
| 193 // have written a non-zero value into |unchanged|; clearing it here will |
| 194 // let that thread detect that something did change. It doesn't matter |
| 195 // when this is done relative to the atomic |depth| operation above so a |
| 196 // "relaxed" access is acceptable. |
| 197 header_->unchanged.store(0, std::memory_order_relaxed); |
| 198 } |
| 199 |
| 200 // static |
| 201 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| 202 return static_cast<size_t>(stack_depth) * sizeof(StackEntry) + sizeof(Header); |
| 203 } |
| 204 |
| 205 ThreadActivityAnalyzer::ThreadActivityAnalyzer(ThreadActivityTracker* tracker) |
| 206 : ThreadActivityAnalyzer( |
| 207 tracker->header_, |
| 208 ThreadActivityTracker::SizeForStackDepth(tracker->stack_slots_)) {} |
| 209 |
| 210 ThreadActivityAnalyzer::ThreadActivityAnalyzer( |
| 211 PersistentMemoryAllocator* allocator, |
| 212 PersistentMemoryAllocator::Reference reference) |
| 213 : ThreadActivityAnalyzer(allocator->GetAsObject<char>( |
| 214 reference, |
| 215 GlobalActivityTracker::kTypeIdActivityTracker), |
| 216 allocator->GetAllocSize(reference)) {} |
| 217 |
| 218 ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size) |
| 219 : tracker_(base, size) {} |
| 220 |
| 221 ThreadActivityAnalyzer::~ThreadActivityAnalyzer() {} |
| 222 |
| 223 uint32_t ThreadActivityAnalyzer::SnapshotStack( |
| 224 std::vector<StackEntry>* snapshot) { |
| 225 // It's possible for the data to change while reading it in such a way that it |
| 226 // invalidates the read. Make several attempts but don't try forever. |
| 227 const int kMaxAttempts = 10; |
| 228 uint32_t depth; |
| 229 |
| 230 // Start with an empty return stack. |
| 231 snapshot->clear(); |
| 232 |
| 233 // Stop here if the data isn't valid. |
| 234 if (!tracker_.is_valid()) |
| 235 return 0; |
| 236 |
| 237 ThreadActivityTracker::Header* header = tracker_.header_; |
| 238 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 239 // Write a non-zero value to |unchanged| so it's possible to detect at |
| 240 // the end that nothing has changed since copying the data began. |
| 241 header->unchanged.store(1, std::memory_order_relaxed); |
| 242 |
| 243 // Fetching the current depth also "acquires" the contents of the stack. |
| 244 depth = header->depth.load(std::memory_order_acquire); |
| 245 if (depth == 0) |
| 246 return 0; |
| 247 |
| 248 // Copy the existing contents. Memcpy is used for speed. |
| 249 uint32_t count = std::min(depth, tracker_.stack_slots_); |
| 250 snapshot->resize(count); |
| 251 memcpy(&(*snapshot)[0], tracker_.stack_, count * sizeof(StackEntry)); |
| 252 |
| 253 // Check to make sure everything was unchanged during the copy. |
| 254 if (header->unchanged.load(std::memory_order_relaxed)) |
| 255 return depth; |
| 256 } |
| 257 |
| 258 // If all attempts failed, just return the depth with no content. |
| 259 snapshot->clear(); |
| 260 return depth; |
| 261 } |
| 262 |
| 263 |
| 264 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; |
| 265 |
| 266 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| 267 PersistentMemoryAllocator::Reference mem_reference, |
| 268 void* base, |
| 269 size_t size) |
| 270 : ThreadActivityTracker(base, size), |
| 271 mem_reference_(mem_reference), |
| 272 mem_base_(base) {} |
| 273 |
| 274 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { |
| 275 // The global |g_tracker_| must point to the owner of this class since all |
| 276 // objects of this type must be destructed before |g_tracker_| can be changed. |
| 277 DCHECK(g_tracker_); |
| 278 g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_); |
| 279 } |
| 280 |
| 281 void GlobalActivityTracker::CreateWithAllocator( |
| 282 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 283 int stack_depth) { |
| 284 // There's no need to do anything with the result. It is self-managing. |
| 285 new GlobalActivityTracker(std::move(allocator), stack_depth); |
| 286 } |
| 287 |
| 288 // static |
| 289 void GlobalActivityTracker::CreateWithLocalMemory(size_t size, |
| 290 uint64_t id, |
| 291 StringPiece name, |
| 292 int stack_depth) { |
| 293 CreateWithAllocator( |
| 294 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
| 295 stack_depth); |
| 296 } |
| 297 |
| 298 // static |
| 299 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, |
| 300 size_t size, |
| 301 uint64_t id, |
| 302 StringPiece name, |
| 303 int stack_depth) { |
| 304 DCHECK(!file_path.empty()); |
| 305 |
| 306 // Create the file, overwriting anything that was there previously, and set |
| 307 // the length. This will create a space that is zero-filled, a requirement |
| 308 // for operation. |
| 309 File file(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ | |
| 310 File::FLAG_WRITE | File::FLAG_SHARE_DELETE); |
| 311 DCHECK(file.IsValid()); |
| 312 file.SetLength(size); |
| 313 |
| 314 // Map the file into memory and make it globally available. |
| 315 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); |
| 316 bool success = |
| 317 mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE); |
| 318 DCHECK(success); |
| 319 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( |
| 320 std::move(mapped_file), size, id, name, false)), |
| 321 stack_depth); |
| 322 } |
| 323 |
| 324 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
| 325 DCHECK(!this_thread_tracker_.Get()); |
| 326 |
| 327 // The lock must be acquired to access the STL data structures. |
| 328 AutoLock auto_lock(lock_); |
| 329 |
| 330 PersistentMemoryAllocator::Reference mem_reference; |
| 331 void* mem_base; |
| 332 if (!available_memories_.empty()) { |
| 333 // There is a memory block that was previously released (and zero'd) so |
| 334 // just re-use that rather than allocating a new one. |
| 335 mem_reference = available_memories_.back(); |
| 336 available_memories_.pop_back(); |
| 337 mem_base = allocator_->GetAsObject<char>(mem_reference, |
| 338 kTypeIdActivityTrackerFree); |
| 339 DCHECK(mem_base); |
| 340 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 341 allocator_->SetType(mem_reference, kTypeIdActivityTracker); |
| 342 } else { |
| 343 // Allocate a block of memory from the persistent segment. |
| 344 mem_reference = |
| 345 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); |
| 346 if (mem_reference) { |
| 347 // Success. Convert the reference to an actual memory address. |
| 348 mem_base = |
| 349 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 350 // Make the allocation iterable so it can be found by other processes. |
| 351 allocator_->MakeIterable(mem_reference); |
| 352 } else { |
| 353 // Failure. This should never happen. |
| 354 NOTREACHED(); |
| 355 // But if it does, handle it gracefully by allocating the required |
| 356 // memory from the heap. |
| 357 mem_base = new char[stack_memory_size_]; |
| 358 memset(mem_base, 0, stack_memory_size_); |
| 359 } |
| 360 } |
| 361 |
| 362 // Create a tracker with the acquired memory and set it as the tracker |
| 363 // for this particular thread in thread-local-storage. |
| 364 ManagedActivityTracker* tracker = |
| 365 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); |
| 366 DCHECK(tracker->is_valid()); |
| 367 thread_trackers_.insert(tracker); |
| 368 this_thread_tracker_.Set(tracker); |
| 369 |
| 370 return tracker; |
| 371 } |
| 372 |
| 373 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| 374 ThreadActivityTracker* tracker = |
| 375 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| 376 if (tracker) { |
| 377 this_thread_tracker_.Free(); |
| 378 delete tracker; |
| 379 } |
| 380 } |
| 381 |
| 382 GlobalActivityTracker::GlobalActivityTracker( |
| 383 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 384 int stack_depth) |
| 385 : allocator_(std::move(allocator)), |
| 386 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
| 387 this_thread_tracker_(&OnTLSDestroy) { |
| 388 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 389 uint32_t type; |
| 390 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 391 |
| 392 // Ensure that there is no other global object and then make this one such. |
| 393 DCHECK(!g_tracker_); |
| 394 g_tracker_ = this; |
| 395 |
| 396 // Create a tracker for this thread since it is known. |
| 397 CreateTrackerForCurrentThread(); |
| 398 } |
| 399 |
| 400 GlobalActivityTracker::~GlobalActivityTracker() { |
| 401 DCHECK_EQ(g_tracker_, this); |
| 402 DCHECK_EQ(0U, thread_trackers_.size()); |
| 403 g_tracker_ = nullptr; |
| 404 } |
| 405 |
| 406 void GlobalActivityTracker::ReturnTrackerMemory( |
| 407 ManagedActivityTracker* tracker, |
| 408 PersistentMemoryAllocator::Reference mem_reference, |
| 409 void* mem_base) { |
| 410 // Zero the memory so that it is ready for use if needed again later. It's |
| 411 // better to clear the memory now, when a thread is exiting, than to do it |
| 412 // when it is first needed by a thread doing actual work. |
| 413 memset(mem_base, 0, stack_memory_size_); |
| 414 |
| 415 // Access to STL structurs requires a lock because this could get called |
| 416 // from any thread. |
| 417 AutoLock auto_lock(lock_); |
| 418 |
| 419 // Remove the destructed tracker from the set of known ones. |
| 420 DCHECK(ContainsKey(thread_trackers_, tracker)); |
| 421 thread_trackers_.erase(tracker); |
| 422 |
| 423 // Deal with the memory that was used by the tracker. |
| 424 if (mem_reference) { |
| 425 // The memory was within the persistent memory allocator. Change its type |
| 426 // so that iteration won't find it. |
| 427 allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree); |
| 428 // There is no way to free memory from a persistent allocator so instead |
| 429 // keep it on the internal list of available memory blocks. |
| 430 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 431 available_memories_.push_back(mem_reference); |
| 432 } else { |
| 433 // The memory was allocated from the process heap. This shouldn't happen |
| 434 // because the persistent memory segment should be big enough for all |
| 435 // thread stacks but it's better to support falling back to allocation |
| 436 // from the heap rather than crash. Everything will work as normal but |
| 437 // the data won't be persisted. |
| 438 delete[] reinterpret_cast<char*>(mem_base); |
| 439 } |
| 440 } |
| 441 |
| 442 // static |
| 443 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 444 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 445 } |
| 446 |
| 447 |
| 448 ScopedTaskActivity::ScopedTaskActivity(const PendingTask& task) |
| 449 : GlobalActivityTracker::ScopedThreadActivity( |
| 450 task.posted_from.program_counter(), |
| 451 ThreadActivityTracker::ACT_TASK, |
| 452 ThreadActivityTracker::StackEntryData::ForTask(task.sequence_num)) {} |
| 453 |
| 454 } // namespace debug |
| 455 } // namespace base |
OLD | NEW |