Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/debug/stack_trace.h" | 9 #include "base/debug/stack_trace.h" |
| 10 #include "base/files/file.h" | 10 #include "base/files/file.h" |
| 11 #include "base/files/file_path.h" | 11 #include "base/files/file_path.h" |
| 12 #include "base/files/memory_mapped_file.h" | 12 #include "base/files/memory_mapped_file.h" |
| 13 #include "base/logging.h" | 13 #include "base/logging.h" |
| 14 #include "base/memory/ptr_util.h" | 14 #include "base/memory/ptr_util.h" |
| 15 #include "base/memory/ptr_util.h" | |
| 15 #include "base/metrics/field_trial.h" | 16 #include "base/metrics/field_trial.h" |
| 16 #include "base/metrics/histogram_macros.h" | 17 #include "base/metrics/histogram_macros.h" |
| 17 #include "base/pending_task.h" | 18 #include "base/pending_task.h" |
| 18 #include "base/process/process.h" | 19 #include "base/process/process.h" |
| 19 #include "base/process/process_handle.h" | 20 #include "base/process/process_handle.h" |
| 20 #include "base/stl_util.h" | 21 #include "base/stl_util.h" |
| 21 #include "base/strings/string_util.h" | 22 #include "base/strings/string_util.h" |
| 22 #include "base/threading/platform_thread.h" | 23 #include "base/threading/platform_thread.h" |
| 23 | 24 |
| 24 namespace base { | 25 namespace base { |
| 25 namespace debug { | 26 namespace debug { |
| 26 | 27 |
| 27 namespace { | 28 namespace { |
| 28 | 29 |
| 29 // A number that identifies the memory as having been initialized. It's | 30 // A number that identifies the memory as having been initialized. It's |
| 30 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). | 31 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). |
| 31 // A version number is added on so that major structure changes won't try to | 32 // A version number is added on so that major structure changes won't try to |
| 32 // read an older version (since the cookie won't match). | 33 // read an older version (since the cookie won't match). |
| 33 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 | 34 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 |
| 34 | 35 |
| 35 // The minimum depth a stack should support. | 36 // The minimum depth a stack should support. |
| 36 const int kMinStackDepth = 2; | 37 const int kMinStackDepth = 2; |
| 37 | 38 |
| 38 // The amount of memory set aside for holding arbitrary user data (key/value | 39 // The amount of memory set aside for holding arbitrary user data (key/value |
| 39 // pairs) globally or associated with ActivityData entries. | 40 // pairs) globally or associated with ActivityData entries. |
| 40 const size_t kUserDataSize = 1024; // bytes | 41 const size_t kUserDataSize = 1024; // bytes |
| 41 const size_t kGlobalDataSize = 1024; // bytes | 42 const size_t kGlobalDataSize = 4096; // bytes |
| 42 const size_t kMaxUserDataNameLength = | 43 const size_t kMaxUserDataNameLength = |
| 43 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); | 44 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); |
| 44 | 45 |
| 45 union ThreadRef { | 46 union ThreadRef { |
| 46 int64_t as_id; | 47 int64_t as_id; |
| 47 #if defined(OS_WIN) | 48 #if defined(OS_WIN) |
| 48 // On Windows, the handle itself is often a pseudo-handle with a common | 49 // On Windows, the handle itself is often a pseudo-handle with a common |
| 49 // value meaning "this thread" and so the thread-id is used. The former | 50 // value meaning "this thread" and so the thread-id is used. The former |
| 50 // can be converted to a thread-id with a system call. | 51 // can be converted to a thread-id with a system call. |
| 51 PlatformThreadId as_tid; | 52 PlatformThreadId as_tid; |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 187 size_t i; | 188 size_t i; |
| 188 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) { | 189 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) { |
| 189 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]); | 190 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]); |
| 190 } | 191 } |
| 191 activity->call_stack[i - 1] = 0; | 192 activity->call_stack[i - 1] = 0; |
| 192 #else | 193 #else |
| 193 activity->call_stack[0] = 0; | 194 activity->call_stack[0] = 0; |
| 194 #endif | 195 #endif |
| 195 } | 196 } |
| 196 | 197 |
| 197 ActivitySnapshot::ActivitySnapshot() {} | 198 ActivityUserData::TypedValue::TypedValue() {} |
| 198 ActivitySnapshot::~ActivitySnapshot() {} | 199 ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default; |
| 200 ActivityUserData::TypedValue::~TypedValue() {} | |
| 201 | |
| 202 StringPiece ActivityUserData::TypedValue::Get() const { | |
| 203 DCHECK_EQ(RAW_VALUE, type); | |
| 204 return long_value; | |
| 205 } | |
| 206 | |
| 207 StringPiece ActivityUserData::TypedValue::GetReference() const { | |
| 208 DCHECK_EQ(RAW_VALUE_REFERENCE, type); | |
| 209 return ref_value; | |
| 210 } | |
| 211 | |
| 212 StringPiece ActivityUserData::TypedValue::GetString() const { | |
| 213 DCHECK_EQ(STRING_VALUE, type); | |
| 214 return long_value; | |
| 215 } | |
| 216 | |
| 217 StringPiece ActivityUserData::TypedValue::GetStringReference() const { | |
| 218 DCHECK_EQ(STRING_VALUE_REFERENCE, type); | |
| 219 return ref_value; | |
| 220 } | |
| 221 | |
| 222 bool ActivityUserData::TypedValue::GetBool() const { | |
| 223 DCHECK_EQ(BOOL_VALUE, type); | |
| 224 return short_value != 0; | |
| 225 } | |
| 226 | |
| 227 char ActivityUserData::TypedValue::GetChar() const { | |
| 228 DCHECK_EQ(CHAR_VALUE, type); | |
| 229 return static_cast<char>(short_value); | |
| 230 } | |
| 231 | |
| 232 int64_t ActivityUserData::TypedValue::GetInt() const { | |
| 233 DCHECK_EQ(SIGNED_VALUE, type); | |
| 234 return static_cast<int64_t>(short_value); | |
| 235 } | |
| 236 | |
| 237 uint64_t ActivityUserData::TypedValue::GetUint() const { | |
| 238 DCHECK_EQ(UNSIGNED_VALUE, type); | |
| 239 return static_cast<uint64_t>(short_value); | |
| 240 } | |
| 199 | 241 |
| 200 ActivityUserData::ValueInfo::ValueInfo() {} | 242 ActivityUserData::ValueInfo::ValueInfo() {} |
| 201 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; | 243 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| 202 ActivityUserData::ValueInfo::~ValueInfo() {} | 244 ActivityUserData::ValueInfo::~ValueInfo() {} |
| 203 | 245 |
| 246 std::atomic<uint32_t> ActivityUserData::next_id_; | |
|
manzagop (departed)
2016/11/29 22:42:58
Did you mean to set a value?
bcwhite
2016/12/01 16:29:39
Adding "=1" caused a compile error on some platfor
| |
| 247 | |
| 204 ActivityUserData::ActivityUserData(void* memory, size_t size) | 248 ActivityUserData::ActivityUserData(void* memory, size_t size) |
| 205 : memory_(static_cast<char*>(memory)), available_(size) {} | 249 : memory_(reinterpret_cast<char*>(memory)), |
| 250 available_(size), | |
| 251 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { | |
| 252 // It's possible that no user data is being stored. | |
| 253 if (!memory_) | |
| 254 return; | |
| 255 | |
| 256 DCHECK_LT(kMemoryAlignment, available_); | |
| 257 if (id_->load(std::memory_order_relaxed) == 0) { | |
| 258 // Generate a new ID and store it in the first 32-bit word of memory_. | |
| 259 // |id_| must be non-zero for non-sink instances. | |
| 260 uint32_t id; | |
| 261 while ((id = next_id_.fetch_add(1, std::memory_order_relaxed)) == 0) | |
| 262 ; | |
| 263 id_->store(id, std::memory_order_relaxed); | |
| 264 DCHECK_NE(0U, id_->load(std::memory_order_relaxed)); | |
| 265 } | |
| 266 memory_ += kMemoryAlignment; | |
| 267 available_ -= kMemoryAlignment; | |
| 268 | |
| 269 // If there is already data present, load that. This allows the same class | |
| 270 // to be used for analysis through snapshots. | |
| 271 ImportExistingData(); | |
|
manzagop (departed)
2016/11/29 22:42:58
Worth avoiding the import when the id was initiall
bcwhite
2016/12/01 16:29:39
It would require making a copy of the initial ID v
| |
| 272 } | |
| 206 | 273 |
| 207 ActivityUserData::~ActivityUserData() {} | 274 ActivityUserData::~ActivityUserData() {} |
| 208 | 275 |
| 209 void ActivityUserData::Set(StringPiece name, | 276 void ActivityUserData::Set(StringPiece name, |
| 210 ValueType type, | 277 ValueType type, |
| 211 const void* memory, | 278 const void* memory, |
| 212 size_t size) { | 279 size_t size) { |
| 213 DCHECK(thread_checker_.CalledOnValidThread()); | 280 DCHECK(thread_checker_.CalledOnValidThread()); |
| 214 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); | 281 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); |
| 215 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), | 282 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 232 // because there are not alignment constraints on strings, it's set tight | 299 // because there are not alignment constraints on strings, it's set tight |
| 233 // against the header. Its extent (the reserved space, even if it's not | 300 // against the header. Its extent (the reserved space, even if it's not |
| 234 // all used) is calculated so that, when pressed against the header, the | 301 // all used) is calculated so that, when pressed against the header, the |
| 235 // following field will be aligned properly. | 302 // following field will be aligned properly. |
| 236 size_t name_size = name.length(); | 303 size_t name_size = name.length(); |
| 237 size_t name_extent = | 304 size_t name_extent = |
| 238 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - | 305 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - |
| 239 sizeof(Header); | 306 sizeof(Header); |
| 240 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); | 307 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); |
| 241 | 308 |
| 242 // The "basic size" is the minimum size of the record. It's possible that | 309 // The "full size" is the size for storing the entire value. |
| 243 // lengthy values will get truncated but there must be at least some bytes | |
| 244 // available. | |
| 245 size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment; | |
| 246 if (basic_size > available_) | |
| 247 return; // No space to store even the smallest value. | |
| 248 | |
| 249 // The "full size" is the size for storing the entire value, truncated | |
| 250 // to the amount of available memory. | |
| 251 size_t full_size = | 310 size_t full_size = |
| 252 std::min(sizeof(Header) + name_extent + value_extent, available_); | 311 std::min(sizeof(Header) + name_extent + value_extent, available_); |
| 312 | |
| 313 // If the value is actually a single byte, see if it can be stuffed at the | |
| 314 // end of the name extent rather than wasting kMemoryAlignment bytes. | |
| 315 if (size == 1 && name_extent > name_size) { | |
| 316 full_size = sizeof(Header) + name_extent; | |
|
manzagop (departed)
2016/11/29 22:42:58
std::min(sizeof(Header) + name_extent, available_)
bcwhite
2016/12/01 16:29:39
Done, but with a separate check placed above.
| |
| 317 --name_extent; | |
| 318 } | |
| 319 | |
| 320 // Truncate the stored size to the amount of available memory. Stop now if | |
| 321 // there's not any room for even part of the value. | |
| 253 size = std::min(full_size - sizeof(Header) - name_extent, size); | 322 size = std::min(full_size - sizeof(Header) - name_extent, size); |
|
manzagop (departed)
2016/11/29 22:42:58
I guess full size could be equal to available_ whi
bcwhite
2016/12/01 16:29:39
Done as part of the above.
| |
| 323 if (size == 0) | |
| 324 return; | |
| 254 | 325 |
| 255 // Allocate a chunk of memory. | 326 // Allocate a chunk of memory. |
| 256 Header* header = reinterpret_cast<Header*>(memory_); | 327 Header* header = reinterpret_cast<Header*>(memory_); |
| 257 memory_ += full_size; | 328 memory_ += full_size; |
| 258 available_ -= full_size; | 329 available_ -= full_size; |
| 259 | 330 |
| 260 // Datafill the header and name records. Memory must be zeroed. The |type| | 331 // Datafill the header and name records. Memory must be zeroed. The |type| |
| 261 // is written last, atomically, to release all the other values. | 332 // is written last, atomically, to release all the other values. |
| 262 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); | 333 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); |
| 263 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); | 334 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 296 void ActivityUserData::SetReference(StringPiece name, | 367 void ActivityUserData::SetReference(StringPiece name, |
| 297 ValueType type, | 368 ValueType type, |
| 298 const void* memory, | 369 const void* memory, |
| 299 size_t size) { | 370 size_t size) { |
| 300 ReferenceRecord rec; | 371 ReferenceRecord rec; |
| 301 rec.address = reinterpret_cast<uintptr_t>(memory); | 372 rec.address = reinterpret_cast<uintptr_t>(memory); |
| 302 rec.size = size; | 373 rec.size = size; |
| 303 Set(name, type, &rec, sizeof(rec)); | 374 Set(name, type, &rec, sizeof(rec)); |
| 304 } | 375 } |
| 305 | 376 |
| 377 void ActivityUserData::ImportExistingData() const { | |
| 378 while (available_ > 0) { | |
|
manzagop (departed)
2016/11/29 22:42:58
0 -> sizeof(Header)?
bcwhite
2016/12/01 16:29:39
Done.
| |
| 379 Header* header = reinterpret_cast<Header*>(memory_); | |
| 380 ValueType type = | |
| 381 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); | |
| 382 if (type == END_OF_VALUES) | |
| 383 return; | |
| 384 if (header->record_size > available_) | |
| 385 NOTREACHED(); // return; | |
| 386 | |
| 387 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, | |
| 388 kMemoryAlignment); | |
| 389 if (header->record_size == value_offset && | |
| 390 header->value_size.load(std::memory_order_relaxed) == 1) { | |
| 391 value_offset -= 1; | |
| 392 } | |
| 393 if (value_offset + header->value_size > header->record_size) | |
| 394 NOTREACHED(); // return; | |
| 395 | |
| 396 ValueInfo info; | |
| 397 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); | |
| 398 info.type = type; | |
| 399 info.memory = memory_ + value_offset; | |
| 400 info.size_ptr = &header->value_size; | |
| 401 info.extent = header->record_size - value_offset; | |
| 402 | |
| 403 StringPiece key(info.name); | |
| 404 values_.insert(std::make_pair(key, std::move(info))); | |
|
manzagop (departed)
2016/11/29 22:42:58
Do you want to support successive calls of ImportE
bcwhite
2016/12/01 16:29:39
It's called as part of every snapshot operation in
manzagop (departed)
2016/12/02 22:13:32
Hm my comment was totally off! You scan from avail
bcwhite
2016/12/08 21:30:56
Ah. The |memory_| pointer always points to the en
| |
| 405 | |
| 406 memory_ += header->record_size; | |
| 407 available_ -= header->record_size; | |
| 408 } | |
| 409 } | |
| 410 | |
| 411 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { | |
| 412 DCHECK(output_snapshot); | |
| 413 DCHECK(output_snapshot->empty()); | |
| 414 | |
| 415 // Find any new data that may have been added by an active instance of this | |
| 416 // class that is adding records. | |
| 417 ImportExistingData(); | |
| 418 | |
| 419 for (const auto& entry : values_) { | |
| 420 TypedValue value; | |
| 421 value.type = entry.second.type; | |
| 422 DCHECK_GE(entry.second.extent, | |
| 423 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 424 | |
| 425 switch (entry.second.type) { | |
| 426 case RAW_VALUE: | |
| 427 case STRING_VALUE: | |
| 428 value.long_value = | |
| 429 std::string(reinterpret_cast<char*>(entry.second.memory), | |
| 430 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 431 break; | |
| 432 case RAW_VALUE_REFERENCE: | |
| 433 case STRING_VALUE_REFERENCE: { | |
| 434 ReferenceRecord* ref = | |
| 435 reinterpret_cast<ReferenceRecord*>(entry.second.memory); | |
| 436 value.ref_value = StringPiece( | |
| 437 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), | |
| 438 static_cast<size_t>(ref->size)); | |
| 439 } break; | |
| 440 case BOOL_VALUE: | |
| 441 case CHAR_VALUE: | |
| 442 value.short_value = *reinterpret_cast<char*>(entry.second.memory); | |
| 443 break; | |
| 444 case SIGNED_VALUE: | |
| 445 case UNSIGNED_VALUE: | |
| 446 value.short_value = *reinterpret_cast<uint64_t*>(entry.second.memory); | |
| 447 break; | |
| 448 case END_OF_VALUES: // Included for completeness purposes. | |
| 449 NOTREACHED(); | |
| 450 } | |
| 451 auto inserted = output_snapshot->insert( | |
| 452 std::make_pair(entry.second.name.as_string(), std::move(value))); | |
| 453 DCHECK(inserted.second); // True if inserted, false if existed. | |
| 454 } | |
| 455 | |
| 456 return true; | |
| 457 } | |
| 458 | |
| 459 ActivitySnapshot::ActivitySnapshot() {} | |
| 460 ActivitySnapshot::~ActivitySnapshot() {} | |
| 461 | |
| 306 // This information is kept for every thread that is tracked. It is filled | 462 // This information is kept for every thread that is tracked. It is filled |
| 307 // the very first time the thread is seen. All fields must be of exact sizes | 463 // the very first time the thread is seen. All fields must be of exact sizes |
| 308 // so there is no issue moving between 32 and 64-bit builds. | 464 // so there is no issue moving between 32 and 64-bit builds. |
| 309 struct ThreadActivityTracker::Header { | 465 struct ThreadActivityTracker::Header { |
| 310 // Expected size for 32/64-bit check. | 466 // Expected size for 32/64-bit check. |
| 311 static constexpr size_t kExpectedInstanceSize = 80; | 467 static constexpr size_t kExpectedInstanceSize = 80; |
| 312 | 468 |
| 313 // This unique number indicates a valid initialization of the memory. | 469 // This unique number indicates a valid initialization of the memory. |
| 314 std::atomic<uint32_t> cookie; | 470 std::atomic<uint32_t> cookie; |
| 315 | 471 |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 375 tracker_->PopActivity(activity_id_); | 531 tracker_->PopActivity(activity_id_); |
| 376 } | 532 } |
| 377 | 533 |
| 378 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData( | 534 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData( |
| 379 Activity::Type type, | 535 Activity::Type type, |
| 380 const ActivityData& data) { | 536 const ActivityData& data) { |
| 381 if (tracker_) | 537 if (tracker_) |
| 382 tracker_->ChangeActivity(activity_id_, type, data); | 538 tracker_->ChangeActivity(activity_id_, type, data); |
| 383 } | 539 } |
| 384 | 540 |
| 385 ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() { | |
| 386 if (!user_data_) { | |
| 387 if (tracker_) | |
| 388 user_data_ = tracker_->GetUserData(activity_id_); | |
| 389 else | |
| 390 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); | |
| 391 } | |
| 392 return *user_data_; | |
| 393 } | |
| 394 | |
| 395 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) | 541 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
| 396 : header_(static_cast<Header*>(base)), | 542 : header_(static_cast<Header*>(base)), |
| 397 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + | 543 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + |
| 398 sizeof(Header))), | 544 sizeof(Header))), |
| 399 stack_slots_( | 545 stack_slots_( |
| 400 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { | 546 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { |
| 401 DCHECK(thread_checker_.CalledOnValidThread()); | 547 DCHECK(thread_checker_.CalledOnValidThread()); |
| 402 | 548 |
| 403 // Verify the parameters but fail gracefully if they're not valid so that | 549 // Verify the parameters but fail gracefully if they're not valid so that |
| 404 // production code based on external inputs will not crash. IsValid() will | 550 // production code based on external inputs will not crash. IsValid() will |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 540 header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1; | 686 header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1; |
| 541 | 687 |
| 542 // Validate that everything is running correctly. | 688 // Validate that everything is running correctly. |
| 543 DCHECK_EQ(id, depth); | 689 DCHECK_EQ(id, depth); |
| 544 | 690 |
| 545 // A thread-checker creates a lock to check the thread-id which means | 691 // A thread-checker creates a lock to check the thread-id which means |
| 546 // re-entry into this code if lock acquisitions are being tracked. | 692 // re-entry into this code if lock acquisitions are being tracked. |
| 547 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || | 693 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || |
| 548 thread_checker_.CalledOnValidThread()); | 694 thread_checker_.CalledOnValidThread()); |
| 549 | 695 |
| 550 // Check if there was any user-data memory. It isn't free'd until later | |
| 551 // because the call to release it can push something on the stack. | |
| 552 PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data; | |
| 553 stack_[depth].user_data = 0; | |
| 554 | |
| 555 // The stack has shrunk meaning that some other thread trying to copy the | 696 // The stack has shrunk meaning that some other thread trying to copy the |
| 556 // contents for reporting purposes could get bad data. That thread would | 697 // contents for reporting purposes could get bad data. That thread would |
| 557 // have written a non-zero value into |stack_unchanged|; clearing it here | 698 // have written a non-zero value into |stack_unchanged|; clearing it here |
| 558 // will let that thread detect that something did change. This needs to | 699 // will let that thread detect that something did change. This needs to |
| 559 // happen after the atomic |depth| operation above so a "release" store | 700 // happen after the atomic |depth| operation above so a "release" store |
| 560 // is required. | 701 // is required. |
| 561 header_->stack_unchanged.store(0, std::memory_order_release); | 702 header_->stack_unchanged.store(0, std::memory_order_release); |
| 562 | |
| 563 // Release resources located above. All stack processing is done so it's | |
| 564 // safe if some outside code does another push. | |
| 565 if (user_data) | |
| 566 GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data); | |
| 567 } | 703 } |
| 568 | 704 |
| 569 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( | 705 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( |
| 570 ActivityId id) { | 706 ActivityId id, |
| 707 ActivityTrackerMemoryAllocator* allocator) { | |
| 571 // User-data is only stored for activities actually held in the stack. | 708 // User-data is only stored for activities actually held in the stack. |
| 572 if (id < stack_slots_) { | 709 if (id < stack_slots_) { |
| 710 // Don't allow user data for lock acquisition as recursion may occur. | |
| 711 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { | |
| 712 NOTREACHED(); | |
| 713 return MakeUnique<ActivityUserData>(nullptr, 0); | |
| 714 } | |
| 715 | |
| 716 // Get (or reuse) a block of memory and create a real UserData object | |
| 717 // on it. | |
| 718 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); | |
| 573 void* memory = | 719 void* memory = |
| 574 GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data); | 720 allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny); |
| 575 if (memory) | 721 if (memory) { |
| 576 return MakeUnique<ActivityUserData>(memory, kUserDataSize); | 722 std::unique_ptr<ActivityUserData> user_data = |
| 723 MakeUnique<ActivityUserData>(memory, kUserDataSize); | |
| 724 stack_[id].user_data_ref = ref; | |
| 725 stack_[id].user_data_id = user_data->id(); | |
| 726 return user_data; | |
| 727 } | |
| 577 } | 728 } |
| 578 | 729 |
| 579 // Return a dummy object that will still accept (but ignore) Set() calls. | 730 // Return a dummy object that will still accept (but ignore) Set() calls. |
| 580 return MakeUnique<ActivityUserData>(nullptr, 0); | 731 return MakeUnique<ActivityUserData>(nullptr, 0); |
| 581 } | 732 } |
| 582 | 733 |
| 734 bool ThreadActivityTracker::HasUserData(ActivityId id) { | |
| 735 // User-data is only stored for activities actually held in the stack. | |
| 736 return (id < stack_slots_ && stack_[id].user_data_ref); | |
| 737 } | |
| 738 | |
| 739 void ThreadActivityTracker::ReleaseUserData( | |
| 740 ActivityId id, | |
| 741 ActivityTrackerMemoryAllocator* allocator) { | |
| 742 // User-data is only stored for activities actually held in the stack. | |
| 743 if (id < stack_slots_ && stack_[id].user_data_ref) { | |
| 744 allocator->ReleaseObjectReference(stack_[id].user_data_ref); | |
| 745 stack_[id].user_data_ref = 0; | |
| 746 } | |
| 747 } | |
| 748 | |
| 583 bool ThreadActivityTracker::IsValid() const { | 749 bool ThreadActivityTracker::IsValid() const { |
| 584 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || | 750 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || |
| 585 header_->process_id.load(std::memory_order_relaxed) == 0 || | 751 header_->process_id.load(std::memory_order_relaxed) == 0 || |
| 586 header_->thread_ref.as_id == 0 || | 752 header_->thread_ref.as_id == 0 || |
| 587 header_->start_time == 0 || | 753 header_->start_time == 0 || |
| 588 header_->start_ticks == 0 || | 754 header_->start_ticks == 0 || |
| 589 header_->stack_slots != stack_slots_ || | 755 header_->stack_slots != stack_slots_ || |
| 590 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { | 756 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { |
| 591 return false; | 757 return false; |
| 592 } | 758 } |
| 593 | 759 |
| 594 return valid_; | 760 return valid_; |
| 595 } | 761 } |
| 596 | 762 |
| 597 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const { | 763 bool ThreadActivityTracker::CreateSnapshot( |
| 764 ActivitySnapshot* output_snapshot) const { | |
| 598 DCHECK(output_snapshot); | 765 DCHECK(output_snapshot); |
| 599 | 766 |
| 600 // There is no "called on valid thread" check for this method as it can be | 767 // There is no "called on valid thread" check for this method as it can be |
| 601 // called from other threads or even other processes. It is also the reason | 768 // called from other threads or even other processes. It is also the reason |
| 602 // why atomic operations must be used in certain places above. | 769 // why atomic operations must be used in certain places above. |
| 603 | 770 |
| 604 // It's possible for the data to change while reading it in such a way that it | 771 // It's possible for the data to change while reading it in such a way that it |
| 605 // invalidates the read. Make several attempts but don't try forever. | 772 // invalidates the read. Make several attempts but don't try forever. |
| 606 const int kMaxAttempts = 10; | 773 const int kMaxAttempts = 10; |
| 607 uint32_t depth; | 774 uint32_t depth; |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 703 } | 870 } |
| 704 | 871 |
| 705 // static | 872 // static |
| 706 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | 873 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| 707 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | 874 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
| 708 } | 875 } |
| 709 | 876 |
| 710 | 877 |
| 711 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; | 878 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; |
| 712 | 879 |
| 880 GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity( | |
| 881 const void* program_counter, | |
| 882 const void* origin, | |
| 883 Activity::Type type, | |
| 884 const ActivityData& data, | |
| 885 bool lock_allowed) | |
| 886 : ThreadActivityTracker::ScopedActivity( | |
| 887 GetOrCreateTracker(lock_allowed), | |
| 888 program_counter, | |
| 889 origin, | |
| 890 type, | |
| 891 data) {} | |
| 892 | |
| 893 GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() { | |
| 894 if (tracker_ && tracker_->HasUserData(activity_id_)) { | |
| 895 GlobalActivityTracker* global = GlobalActivityTracker::Get(); | |
| 896 AutoLock lock(global->user_data_allocator_lock_); | |
| 897 tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_); | |
| 898 } | |
| 899 } | |
| 900 | |
| 901 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { | |
| 902 if (!user_data_) { | |
| 903 if (tracker_) { | |
| 904 GlobalActivityTracker* global = GlobalActivityTracker::Get(); | |
| 905 AutoLock lock(global->user_data_allocator_lock_); | |
| 906 user_data_ = | |
| 907 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); | |
| 908 } else { | |
| 909 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); | |
| 910 } | |
| 911 } | |
| 912 return *user_data_; | |
| 913 } | |
| 914 | |
| 713 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( | 915 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| 714 PersistentMemoryAllocator::Reference mem_reference, | 916 PersistentMemoryAllocator::Reference mem_reference, |
| 715 void* base, | 917 void* base, |
| 716 size_t size) | 918 size_t size) |
| 717 : ThreadActivityTracker(base, size), | 919 : ThreadActivityTracker(base, size), |
| 718 mem_reference_(mem_reference), | 920 mem_reference_(mem_reference), |
| 719 mem_base_(base) {} | 921 mem_base_(base) {} |
| 720 | 922 |
| 721 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { | 923 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { |
| 722 // The global |g_tracker_| must point to the owner of this class since all | 924 // The global |g_tracker_| must point to the owner of this class since all |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 828 return tracker; | 1030 return tracker; |
| 829 } | 1031 } |
| 830 | 1032 |
| 831 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | 1033 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| 832 ThreadActivityTracker* tracker = | 1034 ThreadActivityTracker* tracker = |
| 833 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | 1035 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| 834 if (tracker) | 1036 if (tracker) |
| 835 delete tracker; | 1037 delete tracker; |
| 836 } | 1038 } |
| 837 | 1039 |
| 838 void* GlobalActivityTracker::GetUserDataMemory( | |
| 839 PersistentMemoryAllocator::Reference* reference) { | |
| 840 if (!*reference) { | |
| 841 base::AutoLock autolock(user_data_allocator_lock_); | |
| 842 *reference = user_data_allocator_.GetObjectReference(); | |
| 843 if (!*reference) | |
| 844 return nullptr; | |
| 845 } | |
| 846 | |
| 847 void* memory = allocator_->GetAsArray<char>( | |
| 848 *reference, kTypeIdUserDataRecord, PersistentMemoryAllocator::kSizeAny); | |
| 849 DCHECK(memory); | |
| 850 return memory; | |
| 851 } | |
| 852 | |
| 853 void GlobalActivityTracker::ReleaseUserDataMemory( | |
| 854 PersistentMemoryAllocator::Reference* reference) { | |
| 855 DCHECK(*reference); | |
| 856 base::AutoLock autolock(user_data_allocator_lock_); | |
| 857 user_data_allocator_.ReleaseObjectReference(*reference); | |
| 858 *reference = PersistentMemoryAllocator::kReferenceNull; | |
| 859 } | |
| 860 | |
| 861 GlobalActivityTracker::GlobalActivityTracker( | 1040 GlobalActivityTracker::GlobalActivityTracker( |
| 862 std::unique_ptr<PersistentMemoryAllocator> allocator, | 1041 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 863 int stack_depth) | 1042 int stack_depth) |
| 864 : allocator_(std::move(allocator)), | 1043 : allocator_(std::move(allocator)), |
| 865 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | 1044 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
| 866 this_thread_tracker_(&OnTLSDestroy), | 1045 this_thread_tracker_(&OnTLSDestroy), |
| 867 thread_tracker_count_(0), | 1046 thread_tracker_count_(0), |
| 868 thread_tracker_allocator_(allocator_.get(), | 1047 thread_tracker_allocator_(allocator_.get(), |
| 869 kTypeIdActivityTracker, | 1048 kTypeIdActivityTracker, |
| 870 kTypeIdActivityTrackerFree, | 1049 kTypeIdActivityTrackerFree, |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 883 kTypeIdGlobalDataRecord, | 1062 kTypeIdGlobalDataRecord, |
| 884 PersistentMemoryAllocator::kSizeAny), | 1063 PersistentMemoryAllocator::kSizeAny), |
| 885 kGlobalDataSize) { | 1064 kGlobalDataSize) { |
| 886 // Ensure the passed memory is valid and empty (iterator finds nothing). | 1065 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 887 uint32_t type; | 1066 uint32_t type; |
| 888 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 1067 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 889 | 1068 |
| 890 // Ensure that there is no other global object and then make this one such. | 1069 // Ensure that there is no other global object and then make this one such. |
| 891 DCHECK(!g_tracker_); | 1070 DCHECK(!g_tracker_); |
| 892 g_tracker_ = this; | 1071 g_tracker_ = this; |
| 1072 | |
| 1073 // The global user-data record must be iterable in order to be found by an | |
| 1074 // analyzer. | |
| 1075 // allocator_->MakeIterable(...); | |
|
manzagop (departed)
2016/11/29 22:42:58
Uncomment?
bcwhite
2016/12/01 16:29:39
Done. I had to wait for a separate CL to land.
| |
| 893 } | 1076 } |
| 894 | 1077 |
| 895 GlobalActivityTracker::~GlobalActivityTracker() { | 1078 GlobalActivityTracker::~GlobalActivityTracker() { |
| 896 DCHECK_EQ(g_tracker_, this); | 1079 DCHECK_EQ(g_tracker_, this); |
| 897 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 1080 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 898 g_tracker_ = nullptr; | 1081 g_tracker_ = nullptr; |
| 899 } | 1082 } |
| 900 | 1083 |
| 901 void GlobalActivityTracker::ReturnTrackerMemory( | 1084 void GlobalActivityTracker::ReturnTrackerMemory( |
| 902 ManagedActivityTracker* tracker) { | 1085 ManagedActivityTracker* tracker) { |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 997 : GlobalActivityTracker::ScopedThreadActivity( | 1180 : GlobalActivityTracker::ScopedThreadActivity( |
| 998 program_counter, | 1181 program_counter, |
| 999 nullptr, | 1182 nullptr, |
| 1000 Activity::ACT_PROCESS_WAIT, | 1183 Activity::ACT_PROCESS_WAIT, |
| 1001 ActivityData::ForProcess(process->Pid()), | 1184 ActivityData::ForProcess(process->Pid()), |
| 1002 /*lock_allowed=*/true) {} | 1185 /*lock_allowed=*/true) {} |
| 1003 #endif | 1186 #endif |
| 1004 | 1187 |
| 1005 } // namespace debug | 1188 } // namespace debug |
| 1006 } // namespace base | 1189 } // namespace base |
| OLD | NEW |