OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/metrics/activity_tracker.h" |
| 6 |
| 7 #include <atomic> |
| 8 |
| 9 #include "base/files/memory_mapped_file.h" |
| 10 #include "base/logging.h" |
| 11 #include "base/memory/ptr_util.h" |
| 12 #include "base/pending_task.h" |
| 13 #include "base/stl_util.h" |
| 14 |
| 15 namespace base { |
| 16 |
| 17 namespace { |
| 18 |
| 19 // A number that indetifies memory has having been initialized. |
| 20 const uint64_t kHeaderCookie = 0x98476A390137E67A + 1; // v1 |
| 21 |
| 22 // Type identifiers used when storing in persistent memory so they can be |
| 23 // identified during extraction; the first 4 bytes of the SHA1 of the name |
| 24 // is used as a unique integer. A "version number" is added to the base |
| 25 // so that, if the structure of that object changes, stored older versions |
| 26 // will be safely ignored. |
| 27 enum : uint32_t { |
| 28 kTypeIdActivityTracker = 0x5D7381AF + 1, // SHA1(ActivityTracker) v1 |
| 29 kTypeIdActivityTrackerFree = 0x3F0272FB, // SHA1(ActivityTrackerFree) |
| 30 }; |
| 31 |
| 32 } // namespace |
| 33 |
| 34 const Feature kPersistentActivityTrackingFeature{ |
| 35 "PersistentActivityTracking", FEATURE_DISABLED_BY_DEFAULT |
| 36 }; |
| 37 |
| 38 |
| 39 struct ThreadActivityTracker::Header { |
| 40 // This unique number indicates a valid initialization of the memory. |
| 41 uint64_t cookie; |
| 42 |
| 43 // The thread-id to which this data belongs. This identifier is not |
| 44 // guaranteed to mean anything, just to be unique among all active |
| 45 // trackers. |
| 46 uint64_t thread_id; |
| 47 |
| 48 // The start-time and start-ticks when the data was created. Each activity |
| 49 // record has a |time_ticks| value that can be converted to a "wall time" |
| 50 // with these two values. |
| 51 int64_t start_time; |
| 52 int64_t start_ticks; |
| 53 |
| 54 // The number of Activity slots in the data. |
| 55 uint32_t slots; |
| 56 |
| 57 // The current depth of the stack. This may be greater than the number of |
| 58 // slots. If the depth exceeds the number of slots, the newest entries |
| 59 // won't be recorded. |
| 60 std::atomic<uint32_t> depth; |
| 61 |
| 62 // A memory location used to indicate if changes have been made to the stack |
| 63 // that would invalidate an in-progress read of its contents. The active |
| 64 // tracker will zero the value whenever something gets popped from the |
| 65 // stack. A monitoring tracker can write a non-zero value here, copy the |
| 66 // stack contents, and read the value to know, if it is still non-zero, that |
| 67 // the contents didn't change while being copied. |
| 68 std::atomic<int> unchanged; |
| 69 }; |
| 70 |
| 71 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
| 72 : header_(static_cast<Header*>(base)), |
| 73 stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) + |
| 74 sizeof(Header))), |
| 75 slots_((size - sizeof(Header)) / sizeof(StackEntry)) { |
| 76 DCHECK(thread_checker_.CalledOnValidThread()); |
| 77 |
| 78 // Ensure there is enough space for the header and at least a few records. |
| 79 DCHECK_LE(sizeof(Header) + 2 * sizeof(StackEntry), size); |
| 80 |
| 81 // Ensure that the |slots_| calculation didn't overflow. |
| 82 DCHECK_GE(std::numeric_limits<uint32_t>::max(), |
| 83 (size - sizeof(Header)) / sizeof(StackEntry)); |
| 84 |
| 85 // Provided memory should either be completely initialized or all zeros. |
| 86 if (header_->cookie == 0) { |
| 87 // This is a new file. Double-check other fields and then initialize. |
| 88 DCHECK_EQ(0U, header_->thread_id); |
| 89 DCHECK_EQ(0, header_->start_time); |
| 90 DCHECK_EQ(0, header_->start_ticks); |
| 91 DCHECK_EQ(0U, header_->slots); |
| 92 DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed)); |
| 93 DCHECK_EQ(0, header_->unchanged.load(std::memory_order_relaxed)); |
| 94 DCHECK_EQ(0, stack_[0].time_ticks); |
| 95 DCHECK_EQ(0, stack_[0].source_address); |
| 96 DCHECK_EQ(0, stack_[0].method_address); |
| 97 DCHECK_EQ(0U, stack_[0].sequence_id); |
| 98 |
| 99 header_->cookie = kHeaderCookie; |
| 100 header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId()); |
| 101 header_->start_time = base::Time::Now().ToInternalValue(); |
| 102 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); |
| 103 header_->slots = slots_; |
| 104 valid_ = true; |
| 105 } else { |
| 106 // This is a file with existing data. Perform basic consistency checks. |
| 107 if (header_->cookie != kHeaderCookie || |
| 108 header_->slots != slots_ || |
| 109 header_->start_time > base::Time::Now().ToInternalValue() || |
| 110 stack_[0].time_ticks == 0) |
| 111 return; |
| 112 valid_ = true; |
| 113 } |
| 114 } |
| 115 |
| 116 ThreadActivityTracker::~ThreadActivityTracker() {} |
| 117 |
| 118 void ThreadActivityTracker::RecordStart(const void* source, |
| 119 ActivityType activity, |
| 120 intptr_t method, |
| 121 uint64_t sequence) { |
| 122 DCHECK(thread_checker_.CalledOnValidThread()); |
| 123 |
| 124 // Get the current depth of the stack. No access to other memory guarded |
| 125 // by this variable is done here so a "relaxed" load is acceptable. |
| 126 uint32_t depth = header_->depth.load(std::memory_order_relaxed); |
| 127 |
| 128 // Handle the case where the stack depth has exceeded the storage capacity. |
| 129 // Extra entries will be lost leaving only the base of the stack. |
| 130 if (depth >= slots_) { |
| 131 // Since no other memory is being modified, a "relaxed" store is acceptable. |
| 132 header_->depth.store(depth + 1, std::memory_order_relaxed); |
| 133 return; |
| 134 } |
| 135 |
| 136 // Get a pointer to the next entry and load it. No atomicity is required |
| 137 // here because the memory is known only to this thread. It will be made |
| 138 // known to other threads once the depth is incremented. |
| 139 StackEntry* entry = &stack_[depth]; |
| 140 entry->time_ticks = base::TimeTicks::Now().ToInternalValue(); |
| 141 entry->activity_type = activity; |
| 142 entry->source_address = reinterpret_cast<intptr_t>(source); |
| 143 entry->method_address = method; |
| 144 entry->sequence_id = sequence; |
| 145 |
| 146 // Save the incremented depth. Because this guards |entry| memory filled |
| 147 // above that may be read by another thread once the recorded depth changes, |
| 148 // a "release" store is required. |
| 149 header_->depth.store(depth + 1, std::memory_order_release); |
| 150 } |
| 151 |
| 152 void ThreadActivityTracker::RecordFinish(const void* source) { |
| 153 DCHECK(thread_checker_.CalledOnValidThread()); |
| 154 |
| 155 // Do an atomic decrement of the depth. No changes to stack entries guarded |
| 156 // by this variable is done here so a "relaxed" operation is acceptable. |
| 157 // |depth| will receive the value before it was modified. |
| 158 uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed); |
| 159 |
| 160 // Validate that everything is running correctly. |
| 161 DCHECK_LT(0U, depth); |
| 162 if (depth <= slots_) { |
| 163 DCHECK_EQ(reinterpret_cast<intptr_t>(source), |
| 164 stack_[depth - 1].source_address); |
| 165 } |
| 166 |
| 167 // The stack has shrunk meaning that some other thread trying to copy the |
| 168 // contents for reporting purposes could get bad data. That thread would |
| 169 // have written a non-zero value into |unchanged|; clearing it here will |
| 170 // let that thread detect that something did change. It doesn't matter |
| 171 // when this is done relative to the atomic |depth| operation above so a |
| 172 // "relaxed" access is acceptable. |
| 173 header_->unchanged.store(0, std::memory_order_relaxed); |
| 174 } |
| 175 |
| 176 uint32_t ThreadActivityTracker::CopyStack(std::vector<StackEntry>* stack) { |
| 177 // It's possible for the data to change while reading it. Make several |
| 178 // attempts but don't try forever. |
| 179 const int kMaxAttempts = 10; |
| 180 uint32_t depth; |
| 181 |
| 182 // Start with an empty return stack. |
| 183 stack->clear(); |
| 184 |
| 185 // Stop here if the data isn't valid. |
| 186 if (!valid_) |
| 187 return 0; |
| 188 |
| 189 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 190 // Write a non-zero value to |unchanged| so it's possible to detect at |
| 191 // the end that nothing has changed since copying the data began. |
| 192 header_->unchanged.store(1, std::memory_order_relaxed); |
| 193 |
| 194 // Fetching the current depth also "acquires" the contents of the stack. |
| 195 depth = header_->depth.load(std::memory_order_acquire); |
| 196 if (depth == 0) |
| 197 return 0; |
| 198 |
| 199 // Copy the existing contents. Memcpy is used for speed. |
| 200 uint32_t count = std::min(depth, slots_); |
| 201 stack->resize(count); |
| 202 memcpy(&(*stack)[0], stack_, count * sizeof(StackEntry)); |
| 203 |
| 204 // Check to make sure everything was unchanged during the copy. |
| 205 if (header_->unchanged.load(std::memory_order_relaxed)) |
| 206 return depth; |
| 207 } |
| 208 |
| 209 // If all attempts failed, just return the depth with no content. |
| 210 stack->clear(); |
| 211 return depth; |
| 212 } |
| 213 |
| 214 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; |
| 215 |
| 216 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| 217 PersistentMemoryAllocator::Reference mem_reference, |
| 218 void* base, |
| 219 size_t size) |
| 220 : ThreadActivityTracker(base, size), |
| 221 mem_reference_(mem_reference), |
| 222 mem_base_(base) {} |
| 223 |
| 224 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { |
| 225 // The global |g_tracker_| must point to the owner of this class since all |
| 226 // objects of this type must be destructed before |g_tracker_| can be changed. |
| 227 DCHECK(g_tracker_); |
| 228 g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_); |
| 229 } |
| 230 |
| 231 void GlobalActivityTracker::CreateWithAllocator( |
| 232 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 233 size_t stack_memory) { |
| 234 // There's no need to do anything with the result. It is self-managing. |
| 235 new GlobalActivityTracker(std::move(allocator), stack_memory); |
| 236 } |
| 237 |
| 238 // static |
| 239 void GlobalActivityTracker::CreateWithLocalMemory(size_t size, |
| 240 uint64_t id, |
| 241 StringPiece name, |
| 242 size_t stack_memory) { |
| 243 CreateWithAllocator( |
| 244 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
| 245 stack_memory); |
| 246 } |
| 247 |
| 248 // static |
| 249 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, |
| 250 size_t size, |
| 251 uint64_t id, |
| 252 StringPiece name, |
| 253 size_t stack_memory) { |
| 254 // Create the file, overwriting anything that was there previously, and set |
| 255 // the length. This will create a space that is zero-filled, a requirement |
| 256 // for operation. |
| 257 File file(file_path, |
| 258 File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE); |
| 259 file.SetLength(size); |
| 260 |
| 261 // Map the file into memory and make it globally available. |
| 262 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); |
| 263 mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE); |
| 264 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( |
| 265 std::move(mapped_file), id, name)), |
| 266 stack_memory); |
| 267 } |
| 268 |
| 269 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
| 270 DCHECK(!this_thread_tracker_.Get()); |
| 271 |
| 272 // The lock must be acquired to access the STL data structures. |
| 273 AutoLock auto_lock(lock_); |
| 274 |
| 275 PersistentMemoryAllocator::Reference mem_reference; |
| 276 void* mem_base; |
| 277 if (!available_memories_.empty()) { |
| 278 // There is a memory block that was previously released (and zero'd) so |
| 279 // just re-use that rather than allocating a new one. |
| 280 mem_reference = available_memories_.back(); |
| 281 available_memories_.pop_back(); |
| 282 mem_base = allocator_->GetAsObject<char>(mem_reference, |
| 283 kTypeIdActivityTrackerFree); |
| 284 DCHECK(mem_base); |
| 285 DCHECK_LT(stack_memory_, allocator_->GetAllocSize(mem_reference)); |
| 286 allocator_->SetType(mem_reference, kTypeIdActivityTracker); |
| 287 } else { |
| 288 // Allocate a block of memory from the persistent segment. |
| 289 mem_reference = allocator_->Allocate(stack_memory_, kTypeIdActivityTracker); |
| 290 if (mem_reference) { |
| 291 // Success. Convert the reference to an actual memory address. |
| 292 mem_base = |
| 293 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 294 } else { |
| 295 // Failure. This should never happen. |
| 296 NOTREACHED(); |
| 297 // But if it does, handle it gracefully by allocating the required |
| 298 // memory from the heap. |
| 299 mem_base = new char[stack_memory_]; |
| 300 memset(mem_base, 0, stack_memory_); |
| 301 } |
| 302 } |
| 303 |
| 304 // Create a tracker with the acquired memory and set it as the tracker |
| 305 // for this particular thread in thread-local-storage. |
| 306 ManagedActivityTracker* tracker = |
| 307 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_); |
| 308 DCHECK(tracker->is_valid()); |
| 309 thread_trackers_.insert(tracker); |
| 310 this_thread_tracker_.Set(tracker); |
| 311 |
| 312 return tracker; |
| 313 } |
| 314 |
| 315 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| 316 ThreadActivityTracker* tracker = |
| 317 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| 318 if (tracker) { |
| 319 this_thread_tracker_.Free(); |
| 320 delete tracker; |
| 321 } |
| 322 } |
| 323 |
| 324 GlobalActivityTracker::GlobalActivityTracker( |
| 325 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 326 size_t stack_memory) |
| 327 : allocator_(std::move(allocator)), |
| 328 stack_memory_(stack_memory), |
| 329 this_thread_tracker_(&OnTLSDestroy) { |
| 330 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 331 uint32_t type; |
| 332 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 333 |
| 334 // Ensure that there is no other global object and then make this one such. |
| 335 DCHECK(!g_tracker_); |
| 336 g_tracker_ = this; |
| 337 |
| 338 // Create a tracker for this thread since it is known. |
| 339 CreateTrackerForCurrentThread(); |
| 340 } |
| 341 |
| 342 GlobalActivityTracker::~GlobalActivityTracker() { |
| 343 DCHECK_EQ(g_tracker_, this); |
| 344 DCHECK_EQ(0U, thread_trackers_.size()); |
| 345 g_tracker_ = nullptr; |
| 346 } |
| 347 |
| 348 void GlobalActivityTracker::ReturnTrackerMemory( |
| 349 ManagedActivityTracker* tracker, |
| 350 PersistentMemoryAllocator::Reference mem_reference, |
| 351 void* mem_base) { |
| 352 // Zero the memory so that it is ready for use if needed again later. It's |
| 353 // better to clear the memory now, when a thread is exiting, than to do it |
| 354 // when it is first needed by a thread doing actual work. |
| 355 memset(mem_base, 0, stack_memory_); |
| 356 |
| 357 // Access to STL structurs requires a lock because this could get called |
| 358 // from any thread. |
| 359 AutoLock auto_lock(lock_); |
| 360 |
| 361 // Remove the destructed tracker from the set of known ones. |
| 362 DCHECK(ContainsKey(thread_trackers_, tracker)); |
| 363 thread_trackers_.erase(tracker); |
| 364 |
| 365 // Deal with the memory that was used by the tracker. |
| 366 if (mem_reference) { |
| 367 // The memory was within the persistent memory allocator. Change its type |
| 368 // so that iteration won't find it. |
| 369 allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree); |
| 370 // There is no way to free memory from a persistent allocator so instead |
| 371 // keep it on the internal list of available memory blocks. |
| 372 DCHECK_LE(stack_memory_, allocator_->GetAllocSize(mem_reference)); |
| 373 available_memories_.push_back(mem_reference); |
| 374 } else { |
| 375 // The memory was allocated from the process heap. This shouldn't happen |
| 376 // because the persistent memory segment should be big enough for all |
| 377 // thread stacks but it's better to support falling back to allocation |
| 378 // from the heap rather than crash. Everything will work as normal but |
| 379 // the data won't be persisted. |
| 380 delete[] reinterpret_cast<char*>(mem_base); |
| 381 } |
| 382 } |
| 383 |
| 384 // static |
| 385 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 386 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 387 } |
| 388 |
| 389 ScopedTaskActivity::ScopedTaskActivity(const PendingTask& task) |
| 390 : GlobalActivityTracker::ScopedThreadActivity( |
| 391 task.posted_from.program_counter(), |
| 392 ThreadActivityTracker::ACT_TASK, |
| 393 0, |
| 394 task.sequence_num) {} |
| 395 |
| 396 } // namespace base |
OLD | NEW |