Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 313 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; | 313 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| 314 ActivityUserData::ValueInfo::~ValueInfo() {} | 314 ActivityUserData::ValueInfo::~ValueInfo() {} |
| 315 ActivityUserData::MemoryHeader::MemoryHeader() {} | 315 ActivityUserData::MemoryHeader::MemoryHeader() {} |
| 316 ActivityUserData::MemoryHeader::~MemoryHeader() {} | 316 ActivityUserData::MemoryHeader::~MemoryHeader() {} |
| 317 ActivityUserData::FieldHeader::FieldHeader() {} | 317 ActivityUserData::FieldHeader::FieldHeader() {} |
| 318 ActivityUserData::FieldHeader::~FieldHeader() {} | 318 ActivityUserData::FieldHeader::~FieldHeader() {} |
| 319 | 319 |
| 320 ActivityUserData::ActivityUserData(void* memory, size_t size) | 320 ActivityUserData::ActivityUserData(void* memory, size_t size) |
| 321 : memory_(reinterpret_cast<char*>(memory)), | 321 : memory_(reinterpret_cast<char*>(memory)), |
| 322 available_(RoundDownToAlignment(size, kMemoryAlignment)), | 322 available_(RoundDownToAlignment(size, kMemoryAlignment)), |
| 323 header_(reinterpret_cast<MemoryHeader*>(memory)) { | 323 header_(reinterpret_cast<MemoryHeader*>(memory)), |
| 324 orig_data_id(0), | |
| 325 orig_process_id(0), | |
| 326 orig_create_stamp(0) { | |
| 324 // It's possible that no user data is being stored. | 327 // It's possible that no user data is being stored. |
| 325 if (!memory_) | 328 if (!memory_) |
| 326 return; | 329 return; |
| 327 | 330 |
| 328 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); | 331 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
| 329 DCHECK_LT(sizeof(MemoryHeader), available_); | 332 DCHECK_LT(sizeof(MemoryHeader), available_); |
| 330 if (header_->owner.data_id.load(std::memory_order_acquire) == 0) | 333 if (header_->owner.data_id.load(std::memory_order_acquire) == 0) |
| 331 header_->owner.Release_Initialize(); | 334 header_->owner.Release_Initialize(); |
| 332 memory_ += sizeof(MemoryHeader); | 335 memory_ += sizeof(MemoryHeader); |
| 333 available_ -= sizeof(MemoryHeader); | 336 available_ -= sizeof(MemoryHeader); |
| 334 | 337 |
| 338 // Make a copy of identifying information for later comparison. | |
| 339 *const_cast<uint32_t*>(&orig_data_id) = | |
| 340 header_->owner.data_id.load(std::memory_order_acquire); | |
| 341 *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id; | |
| 342 *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp; | |
| 343 | |
| 335 // If there is already data present, load that. This allows the same class | 344 // If there is already data present, load that. This allows the same class |
| 336 // to be used for analysis through snapshots. | 345 // to be used for analysis through snapshots. |
| 337 ImportExistingData(); | 346 ImportExistingData(); |
| 338 } | 347 } |
| 339 | 348 |
| 340 ActivityUserData::~ActivityUserData() {} | 349 ActivityUserData::~ActivityUserData() {} |
| 341 | 350 |
| 342 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { | 351 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
| 343 DCHECK(output_snapshot); | 352 DCHECK(output_snapshot); |
| 344 DCHECK(output_snapshot->empty()); | 353 DCHECK(output_snapshot->empty()); |
| 345 | 354 |
| 346 // Find any new data that may have been added by an active instance of this | 355 // Find any new data that may have been added by an active instance of this |
| 347 // class that is adding records. | 356 // class that is adding records. |
| 348 ImportExistingData(); | 357 ImportExistingData(); |
| 349 | 358 |
| 359 // Add all the values to the snapshot. | |
| 350 for (const auto& entry : values_) { | 360 for (const auto& entry : values_) { |
| 351 TypedValue value; | 361 TypedValue value; |
| 362 const size_t size = entry.second.size_ptr->load(std::memory_order_acquire); | |
| 352 value.type_ = entry.second.type; | 363 value.type_ = entry.second.type; |
| 353 DCHECK_GE(entry.second.extent, | 364 DCHECK_GE(entry.second.extent, size); |
| 354 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 355 | 365 |
| 356 switch (entry.second.type) { | 366 switch (entry.second.type) { |
| 357 case RAW_VALUE: | 367 case RAW_VALUE: |
| 358 case STRING_VALUE: | 368 case STRING_VALUE: |
| 359 value.long_value_ = | 369 value.long_value_ = |
| 360 std::string(reinterpret_cast<char*>(entry.second.memory), | 370 std::string(reinterpret_cast<char*>(entry.second.memory), size); |
| 361 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 362 break; | 371 break; |
| 363 case RAW_VALUE_REFERENCE: | 372 case RAW_VALUE_REFERENCE: |
| 364 case STRING_VALUE_REFERENCE: { | 373 case STRING_VALUE_REFERENCE: { |
| 365 ReferenceRecord* ref = | 374 ReferenceRecord* ref = |
| 366 reinterpret_cast<ReferenceRecord*>(entry.second.memory); | 375 reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
| 367 value.ref_value_ = StringPiece( | 376 value.ref_value_ = StringPiece( |
| 368 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), | 377 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
| 369 static_cast<size_t>(ref->size)); | 378 static_cast<size_t>(ref->size)); |
| 370 } break; | 379 } break; |
| 371 case BOOL_VALUE: | 380 case BOOL_VALUE: |
| 372 case CHAR_VALUE: | 381 case CHAR_VALUE: |
| 373 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); | 382 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); |
| 374 break; | 383 break; |
| 375 case SIGNED_VALUE: | 384 case SIGNED_VALUE: |
| 376 case UNSIGNED_VALUE: | 385 case UNSIGNED_VALUE: |
| 377 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); | 386 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); |
| 378 break; | 387 break; |
| 379 case END_OF_VALUES: // Included for completeness purposes. | 388 case END_OF_VALUES: // Included for completeness purposes. |
| 380 NOTREACHED(); | 389 NOTREACHED(); |
| 381 } | 390 } |
| 382 auto inserted = output_snapshot->insert( | 391 auto inserted = output_snapshot->insert( |
| 383 std::make_pair(entry.second.name.as_string(), std::move(value))); | 392 std::make_pair(entry.second.name.as_string(), std::move(value))); |
| 384 DCHECK(inserted.second); // True if inserted, false if existed. | 393 DCHECK(inserted.second); // True if inserted, false if existed. |
| 385 } | 394 } |
| 386 | 395 |
| 396 // Another import attempt will validate that the underlying memory has not | |
| 397 // been reused for another purpose. Entries added since the first import | |
| 398 // will be ignored here but will be returned if another snapshot is created. | |
| 399 ImportExistingData(); | |
| 400 if (!memory_) { | |
| 401 output_snapshot->clear(); | |
| 402 return false; | |
| 403 } | |
| 404 | |
| 405 // Successful snapshot. | |
| 387 return true; | 406 return true; |
| 388 } | 407 } |
| 389 | 408 |
| 390 const void* ActivityUserData::GetBaseAddress() const { | 409 const void* ActivityUserData::GetBaseAddress() const { |
| 391 // The |memory_| pointer advances as elements are written but the |header_| | 410 // The |memory_| pointer advances as elements are written but the |header_| |
| 392 // value is always at the start of the block so just return that. | 411 // value is always at the start of the block so just return that. |
| 393 return header_; | 412 return header_; |
| 394 } | 413 } |
| 395 | 414 |
| 396 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid, | 415 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid, |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 409 } | 428 } |
| 410 | 429 |
| 411 void ActivityUserData::Set(StringPiece name, | 430 void ActivityUserData::Set(StringPiece name, |
| 412 ValueType type, | 431 ValueType type, |
| 413 const void* memory, | 432 const void* memory, |
| 414 size_t size) { | 433 size_t size) { |
| 415 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); | 434 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); |
| 416 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), | 435 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), |
| 417 size); | 436 size); |
| 418 | 437 |
| 438 // Find any new data that may have been added by an active instance of this | |
|
manzagop (departed)
2017/03/21 21:10:05
Can you say more about what/where this other insta
bcwhite
2017/03/29 22:00:51
You're right. I was once thinking they could oper
| |
| 439 // class that is adding records. This comes before the memory_ test below | |
| 440 // because this call can clear the memory_ pointer if it finds a problem. | |
| 441 ImportExistingData(); | |
| 442 | |
| 419 // It's possible that no user data is being stored. | 443 // It's possible that no user data is being stored. |
| 420 if (!memory_) | 444 if (!memory_) |
| 421 return; | 445 return; |
| 422 | 446 |
| 423 // The storage of a name is limited so use that limit during lookup. | 447 // The storage of a name is limited so use that limit during lookup. |
| 424 if (name.length() > kMaxUserDataNameLength) | 448 if (name.length() > kMaxUserDataNameLength) |
| 425 name.set(name.data(), kMaxUserDataNameLength); | 449 name.set(name.data(), kMaxUserDataNameLength); |
| 426 | 450 |
| 427 ValueInfo* info; | 451 ValueInfo* info; |
| 428 auto existing = values_.find(name); | 452 auto existing = values_.find(name); |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 510 ValueType type, | 534 ValueType type, |
| 511 const void* memory, | 535 const void* memory, |
| 512 size_t size) { | 536 size_t size) { |
| 513 ReferenceRecord rec; | 537 ReferenceRecord rec; |
| 514 rec.address = reinterpret_cast<uintptr_t>(memory); | 538 rec.address = reinterpret_cast<uintptr_t>(memory); |
| 515 rec.size = size; | 539 rec.size = size; |
| 516 Set(name, type, &rec, sizeof(rec)); | 540 Set(name, type, &rec, sizeof(rec)); |
| 517 } | 541 } |
| 518 | 542 |
| 519 void ActivityUserData::ImportExistingData() const { | 543 void ActivityUserData::ImportExistingData() const { |
| 544 // It's possible that no user data is being stored. | |
| 545 if (!memory_) | |
| 546 return; | |
| 547 | |
| 520 while (available_ > sizeof(FieldHeader)) { | 548 while (available_ > sizeof(FieldHeader)) { |
| 521 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); | 549 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
| 522 ValueType type = | 550 ValueType type = |
| 523 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); | 551 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); |
| 524 if (type == END_OF_VALUES) | 552 if (type == END_OF_VALUES) |
| 525 return; | 553 return; |
| 526 if (header->record_size > available_) | 554 if (header->record_size > available_) |
| 527 return; | 555 return; |
| 528 | 556 |
| 529 size_t value_offset = RoundUpToAlignment( | 557 size_t value_offset = RoundUpToAlignment( |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 541 info.memory = memory_ + value_offset; | 569 info.memory = memory_ + value_offset; |
| 542 info.size_ptr = &header->value_size; | 570 info.size_ptr = &header->value_size; |
| 543 info.extent = header->record_size - value_offset; | 571 info.extent = header->record_size - value_offset; |
| 544 | 572 |
| 545 StringPiece key(info.name); | 573 StringPiece key(info.name); |
| 546 values_.insert(std::make_pair(key, std::move(info))); | 574 values_.insert(std::make_pair(key, std::move(info))); |
| 547 | 575 |
| 548 memory_ += header->record_size; | 576 memory_ += header->record_size; |
| 549 available_ -= header->record_size; | 577 available_ -= header->record_size; |
| 550 } | 578 } |
| 579 | |
| 580 // Check if memory has been completely reused. | |
| 581 if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id || | |
| 582 header_->owner.process_id != orig_process_id || | |
| 583 header_->owner.create_stamp != orig_create_stamp) { | |
| 584 memory_ = nullptr; | |
|
manzagop (departed)
2017/03/21 21:10:05
This behavior should be mentioned in the .h.
bcwhite
2017/03/29 22:00:51
Done.
| |
| 585 values_.clear(); | |
| 586 } | |
| 551 } | 587 } |
| 552 | 588 |
| 553 // This information is kept for every thread that is tracked. It is filled | 589 // This information is kept for every thread that is tracked. It is filled |
| 554 // the very first time the thread is seen. All fields must be of exact sizes | 590 // the very first time the thread is seen. All fields must be of exact sizes |
| 555 // so there is no issue moving between 32 and 64-bit builds. | 591 // so there is no issue moving between 32 and 64-bit builds. |
| 556 struct ThreadActivityTracker::Header { | 592 struct ThreadActivityTracker::Header { |
| 557 // Defined in .h for analyzer access. Increment this if structure changes! | 593 // Defined in .h for analyzer access. Increment this if structure changes! |
| 558 static constexpr uint32_t kPersistentTypeId = | 594 static constexpr uint32_t kPersistentTypeId = |
| 559 GlobalActivityTracker::kTypeIdActivityTracker; | 595 GlobalActivityTracker::kTypeIdActivityTracker; |
| 560 | 596 |
| (...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 871 // during the time-sensitive snapshot operation. It is shrunk once the | 907 // during the time-sensitive snapshot operation. It is shrunk once the |
| 872 // actual size is known. | 908 // actual size is known. |
| 873 output_snapshot->activity_stack.reserve(stack_slots_); | 909 output_snapshot->activity_stack.reserve(stack_slots_); |
| 874 | 910 |
| 875 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | 911 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 876 // Remember the data IDs to ensure nothing is replaced during the snapshot | 912 // Remember the data IDs to ensure nothing is replaced during the snapshot |
| 877 // operation. Use "acquire" so that all the non-atomic fields of the | 913 // operation. Use "acquire" so that all the non-atomic fields of the |
| 878 // structure are valid (at least at the current moment in time). | 914 // structure are valid (at least at the current moment in time). |
| 879 const uint32_t starting_id = | 915 const uint32_t starting_id = |
| 880 header_->owner.data_id.load(std::memory_order_acquire); | 916 header_->owner.data_id.load(std::memory_order_acquire); |
| 917 const int64_t starting_create_stamp = header_->owner.create_stamp; | |
| 881 const int64_t starting_process_id = header_->owner.process_id; | 918 const int64_t starting_process_id = header_->owner.process_id; |
| 882 const int64_t starting_thread_id = header_->thread_ref.as_id; | 919 const int64_t starting_thread_id = header_->thread_ref.as_id; |
| 883 | 920 |
| 884 // Write a non-zero value to |stack_unchanged| so it's possible to detect | 921 // Write a non-zero value to |stack_unchanged| so it's possible to detect |
| 885 // at the end that nothing has changed since copying the data began. A | 922 // at the end that nothing has changed since copying the data began. A |
| 886 // "cst" operation is required to ensure it occurs before everything else. | 923 // "cst" operation is required to ensure it occurs before everything else. |
| 887 // Using "cst" memory ordering is relatively expensive but this is only | 924 // Using "cst" memory ordering is relatively expensive but this is only |
| 888 // done during analysis so doesn't directly affect the worker threads. | 925 // done during analysis so doesn't directly affect the worker threads. |
| 889 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | 926 header_->stack_unchanged.store(1, std::memory_order_seq_cst); |
| 890 | 927 |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 904 continue; | 941 continue; |
| 905 | 942 |
| 906 // Stack copied. Record it's full depth. | 943 // Stack copied. Record it's full depth. |
| 907 output_snapshot->activity_stack_depth = depth; | 944 output_snapshot->activity_stack_depth = depth; |
| 908 | 945 |
| 909 // TODO(bcwhite): Snapshot other things here. | 946 // TODO(bcwhite): Snapshot other things here. |
| 910 | 947 |
| 911 // Get the general thread information. | 948 // Get the general thread information. |
| 912 output_snapshot->thread_name = | 949 output_snapshot->thread_name = |
| 913 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); | 950 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); |
| 951 output_snapshot->create_stamp = header_->owner.create_stamp; | |
| 914 output_snapshot->thread_id = header_->thread_ref.as_id; | 952 output_snapshot->thread_id = header_->thread_ref.as_id; |
| 915 output_snapshot->process_id = header_->owner.process_id; | 953 output_snapshot->process_id = header_->owner.process_id; |
| 916 | 954 |
| 917 // All characters of the thread-name buffer were copied so as to not break | 955 // All characters of the thread-name buffer were copied so as to not break |
| 918 // if the trailing NUL were missing. Now limit the length if the actual | 956 // if the trailing NUL were missing. Now limit the length if the actual |
| 919 // name is shorter. | 957 // name is shorter. |
| 920 output_snapshot->thread_name.resize( | 958 output_snapshot->thread_name.resize( |
| 921 strlen(output_snapshot->thread_name.c_str())); | 959 strlen(output_snapshot->thread_name.c_str())); |
| 922 | 960 |
| 923 // If the data ID has changed then the tracker has exited and the memory | 961 // If the data ID has changed then the tracker has exited and the memory |
| 924 // reused by a new one. Try again. | 962 // reused by a new one. Try again. |
| 925 if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id || | 963 if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id || |
| 964 output_snapshot->create_stamp != starting_create_stamp || | |
| 926 output_snapshot->process_id != starting_process_id || | 965 output_snapshot->process_id != starting_process_id || |
| 927 output_snapshot->thread_id != starting_thread_id) { | 966 output_snapshot->thread_id != starting_thread_id) { |
| 928 continue; | 967 continue; |
| 929 } | 968 } |
| 930 | 969 |
| 931 // Only successful if the data is still valid once everything is done since | 970 // Only successful if the data is still valid once everything is done since |
| 932 // it's possible for the thread to end somewhere in the middle and all its | 971 // it's possible for the thread to end somewhere in the middle and all its |
| 933 // values become garbage. | 972 // values become garbage. |
| 934 if (!IsValid()) | 973 if (!IsValid()) |
| 935 return false; | 974 return false; |
| (...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1642 : GlobalActivityTracker::ScopedThreadActivity( | 1681 : GlobalActivityTracker::ScopedThreadActivity( |
| 1643 program_counter, | 1682 program_counter, |
| 1644 nullptr, | 1683 nullptr, |
| 1645 Activity::ACT_PROCESS_WAIT, | 1684 Activity::ACT_PROCESS_WAIT, |
| 1646 ActivityData::ForProcess(process->Pid()), | 1685 ActivityData::ForProcess(process->Pid()), |
| 1647 /*lock_allowed=*/true) {} | 1686 /*lock_allowed=*/true) {} |
| 1648 #endif | 1687 #endif |
| 1649 | 1688 |
| 1650 } // namespace debug | 1689 } // namespace debug |
| 1651 } // namespace base | 1690 } // namespace base |
| OLD | NEW |