Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 | 8 |
| 9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
| 10 #include "base/memory/scoped_ptr.h" | |
| 10 #include "base/message_loop.h" | 11 #include "base/message_loop.h" |
| 12 #include "base/process_util.h" | |
| 11 #include "base/profiler/alternate_timer.h" | 13 #include "base/profiler/alternate_timer.h" |
| 12 #include "base/stringprintf.h" | 14 #include "base/stringprintf.h" |
| 13 #include "base/third_party/valgrind/memcheck.h" | 15 #include "base/third_party/valgrind/memcheck.h" |
| 14 #include "base/threading/thread_restrictions.h" | 16 #include "base/threading/thread_restrictions.h" |
| 17 #include "base/port.h" | |
|
jar (doing other things)
2012/03/21 18:19:55
I'm not sure.... but I think port.h needs to come
Ilya Sherman
2012/03/21 19:23:02
It looks like port.h already #includes build_confi
Ilya Sherman
2012/03/21 21:45:43
Yep, looks like it's not needed for this file. Re
| |
| 15 #include "build/build_config.h" | 18 #include "build/build_config.h" |
| 16 #include "base/port.h" | |
| 17 | 19 |
| 18 using base::TimeDelta; | 20 using base::TimeDelta; |
| 19 | 21 |
| 20 namespace tracked_objects { | 22 namespace tracked_objects { |
| 21 | 23 |
| 22 namespace { | 24 namespace { |
| 23 | 25 |
| 24 // Flag to compile out almost all of the task tracking code. | 26 // Flag to compile out almost all of the task tracking code. |
| 25 const bool kTrackAllTaskObjects = true; | 27 const bool kTrackAllTaskObjects = true; |
| 26 | 28 |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 37 ThreadData::PROFILING_CHILDREN_ACTIVE; | 39 ThreadData::PROFILING_CHILDREN_ACTIVE; |
| 38 | 40 |
| 39 // Control whether an alternate time source (Now() function) is supported by | 41 // Control whether an alternate time source (Now() function) is supported by |
| 40 // the ThreadData class. This compile time flag should be set to true if we | 42 // the ThreadData class. This compile time flag should be set to true if we |
| 41 // want other modules (such as a memory allocator, or a thread-specific CPU time | 43 // want other modules (such as a memory allocator, or a thread-specific CPU time |
| 42 // clock) to be able to provide a thread-specific Now() function. Without this | 44 // clock) to be able to provide a thread-specific Now() function. Without this |
| 43 // compile-time flag, the code will only support the wall-clock time. This flag | 45 // compile-time flag, the code will only support the wall-clock time. This flag |
| 44 // can be flipped to efficiently disable this path (if there is a performance | 46 // can be flipped to efficiently disable this path (if there is a performance |
| 45 // problem with its presence). | 47 // problem with its presence). |
| 46 static const bool kAllowAlternateTimeSourceHandling = true; | 48 static const bool kAllowAlternateTimeSourceHandling = true; |
| 49 | |
| 47 } // namespace | 50 } // namespace |
| 48 | 51 |
| 49 //------------------------------------------------------------------------------ | 52 //------------------------------------------------------------------------------ |
| 50 // DeathData tallies durations when a death takes place. | 53 // DeathData tallies durations when a death takes place. |
| 51 | 54 |
| 52 DeathData::DeathData() { | 55 DeathData::DeathData() { |
| 53 Clear(); | 56 Clear(); |
| 54 } | 57 } |
| 55 | 58 |
| 56 DeathData::DeathData(int count) { | 59 DeathData::DeathData(int count) { |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 106 } | 109 } |
| 107 | 110 |
| 108 DurationInt DeathData::queue_duration_max() const { | 111 DurationInt DeathData::queue_duration_max() const { |
| 109 return queue_duration_max_; | 112 return queue_duration_max_; |
| 110 } | 113 } |
| 111 | 114 |
| 112 DurationInt DeathData::queue_duration_sample() const { | 115 DurationInt DeathData::queue_duration_sample() const { |
| 113 return queue_duration_sample_; | 116 return queue_duration_sample_; |
| 114 } | 117 } |
| 115 | 118 |
| 116 | |
| 117 base::DictionaryValue* DeathData::ToValue() const { | |
| 118 base::DictionaryValue* dictionary = new base::DictionaryValue; | |
| 119 dictionary->Set("count", base::Value::CreateIntegerValue(count_)); | |
| 120 dictionary->Set("run_ms", | |
| 121 base::Value::CreateIntegerValue(run_duration_sum())); | |
| 122 dictionary->Set("run_ms_max", | |
| 123 base::Value::CreateIntegerValue(run_duration_max())); | |
| 124 dictionary->Set("run_ms_sample", | |
| 125 base::Value::CreateIntegerValue(run_duration_sample())); | |
| 126 dictionary->Set("queue_ms", | |
| 127 base::Value::CreateIntegerValue(queue_duration_sum())); | |
| 128 dictionary->Set("queue_ms_max", | |
| 129 base::Value::CreateIntegerValue(queue_duration_max())); | |
| 130 dictionary->Set("queue_ms_sample", | |
| 131 base::Value::CreateIntegerValue(queue_duration_sample())); | |
| 132 return dictionary; | |
| 133 } | |
| 134 | |
| 135 void DeathData::ResetMax() { | 119 void DeathData::ResetMax() { |
| 136 run_duration_max_ = 0; | 120 run_duration_max_ = 0; |
| 137 queue_duration_max_ = 0; | 121 queue_duration_max_ = 0; |
| 138 } | 122 } |
| 139 | 123 |
| 140 void DeathData::Clear() { | 124 void DeathData::Clear() { |
| 141 count_ = 0; | 125 count_ = 0; |
| 142 run_duration_sum_ = 0; | 126 run_duration_sum_ = 0; |
| 143 run_duration_max_ = 0; | 127 run_duration_max_ = 0; |
| 144 run_duration_sample_ = 0; | 128 run_duration_sample_ = 0; |
| 145 queue_duration_sum_ = 0; | 129 queue_duration_sum_ = 0; |
| 146 queue_duration_max_ = 0; | 130 queue_duration_max_ = 0; |
| 147 queue_duration_sample_ = 0; | 131 queue_duration_sample_ = 0; |
| 148 } | 132 } |
| 149 | 133 |
| 150 //------------------------------------------------------------------------------ | 134 //------------------------------------------------------------------------------ |
| 135 SerializedDeathData::SerializedDeathData() { | |
| 136 } | |
| 137 | |
| 138 SerializedDeathData::SerializedDeathData( | |
| 139 const tracked_objects::DeathData& death_data) | |
| 140 : count(death_data.count()), | |
| 141 run_duration_sum(death_data.run_duration_sum()), | |
| 142 run_duration_max(death_data.run_duration_max()), | |
| 143 run_duration_sample(death_data.run_duration_sample()), | |
| 144 queue_duration_sum(death_data.queue_duration_sum()), | |
| 145 queue_duration_max(death_data.queue_duration_max()), | |
| 146 queue_duration_sample(death_data.queue_duration_sample()) { | |
| 147 } | |
| 148 | |
| 149 SerializedDeathData::~SerializedDeathData() { | |
| 150 } | |
| 151 | |
| 152 //------------------------------------------------------------------------------ | |
| 151 BirthOnThread::BirthOnThread(const Location& location, | 153 BirthOnThread::BirthOnThread(const Location& location, |
| 152 const ThreadData& current) | 154 const ThreadData& current) |
| 153 : location_(location), | 155 : location_(location), |
| 154 birth_thread_(¤t) { | 156 birth_thread_(¤t) { |
| 155 } | 157 } |
| 156 | 158 |
| 157 const Location BirthOnThread::location() const { return location_; } | 159 //------------------------------------------------------------------------------ |
| 158 const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; } | 160 SerializedBirthOnThread::SerializedBirthOnThread() { |
| 161 } | |
| 159 | 162 |
| 160 void BirthOnThread::ToValue(const std::string& prefix, | 163 SerializedBirthOnThread::SerializedBirthOnThread( |
| 161 base::DictionaryValue* dictionary) const { | 164 const tracked_objects::BirthOnThread& birth) |
| 162 dictionary->Set(prefix + "_location", location_.ToValue()); | 165 : location(birth.location()), |
| 163 dictionary->Set(prefix + "_thread", | 166 thread_name(birth.birth_thread()->thread_name()) { |
| 164 base::Value::CreateStringValue(birth_thread_->thread_name())); | 167 } |
| 168 | |
| 169 SerializedBirthOnThread::~SerializedBirthOnThread() { | |
| 165 } | 170 } |
| 166 | 171 |
| 167 //------------------------------------------------------------------------------ | 172 //------------------------------------------------------------------------------ |
| 168 Births::Births(const Location& location, const ThreadData& current) | 173 Births::Births(const Location& location, const ThreadData& current) |
| 169 : BirthOnThread(location, current), | 174 : BirthOnThread(location, current), |
| 170 birth_count_(1) { } | 175 birth_count_(1) { } |
| 171 | 176 |
| 172 int Births::birth_count() const { return birth_count_; } | 177 int Births::birth_count() const { return birth_count_; } |
| 173 | 178 |
| 174 void Births::RecordBirth() { ++birth_count_; } | 179 void Births::RecordBirth() { ++birth_count_; } |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 327 return; | 332 return; |
| 328 } | 333 } |
| 329 // We must NOT do any allocations during this callback. | 334 // We must NOT do any allocations during this callback. |
| 330 // Using the simple linked lists avoids all allocations. | 335 // Using the simple linked lists avoids all allocations. |
| 331 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 336 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 332 this->next_retired_worker_ = first_retired_worker_; | 337 this->next_retired_worker_ = first_retired_worker_; |
| 333 first_retired_worker_ = this; | 338 first_retired_worker_ = this; |
| 334 } | 339 } |
| 335 | 340 |
| 336 // static | 341 // static |
| 337 base::DictionaryValue* ThreadData::ToValue(bool reset_max) { | 342 void ThreadData::ToSerializedProcessData(bool reset_max, |
| 338 DataCollector collected_data; // Gather data. | 343 SerializedProcessData* process_data) { |
| 339 // Request multiple calls to collected_data.Append() for all threads. | 344 // Add births that have run to completion to |collected_data|. |
| 340 SendAllMaps(reset_max, &collected_data); | 345 // |birth_counts| tracks the total number of births recorded at each location |
| 341 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 346 // for which we have not seen a death count. |
| 342 base::DictionaryValue* dictionary = new base::DictionaryValue(); | 347 std::map<const BirthOnThread*, int> birth_counts; |
| 343 collected_data.ToValue(dictionary); | 348 ThreadData::SerializeAllExecutedTasks(reset_max, process_data, &birth_counts); |
| 344 return dictionary; | 349 |
| 350 // Add births that are still active -- i.e. objects that have tallied a birth, | |
| 351 // but have not yet tallied a matching death, and hence must be either | |
| 352 // running, queued up, or being held in limbo for future posting. | |
| 353 for (std::map<const BirthOnThread*, int>::const_iterator it = | |
| 354 birth_counts.begin(); | |
| 355 it != birth_counts.end(); ++it) { | |
| 356 if (it->second > 0) { | |
| 357 process_data->snapshots.push_back( | |
| 358 SerializedSnapshot(*it->first, | |
| 359 DeathData(it->second), | |
| 360 "Still_Alive")); | |
| 361 } | |
| 362 } | |
| 345 } | 363 } |
| 346 | 364 |
| 347 Births* ThreadData::TallyABirth(const Location& location) { | 365 Births* ThreadData::TallyABirth(const Location& location) { |
| 348 BirthMap::iterator it = birth_map_.find(location); | 366 BirthMap::iterator it = birth_map_.find(location); |
| 349 Births* child; | 367 Births* child; |
| 350 if (it != birth_map_.end()) { | 368 if (it != birth_map_.end()) { |
| 351 child = it->second; | 369 child = it->second; |
| 352 child->RecordBirth(); | 370 child->RecordBirth(); |
| 353 } else { | 371 } else { |
| 354 child = new Births(location, *this); // Leak this. | 372 child = new Births(location, *this); // Leak this. |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 520 | 538 |
| 521 DurationInt queue_duration = 0; | 539 DurationInt queue_duration = 0; |
| 522 DurationInt run_duration = 0; | 540 DurationInt run_duration = 0; |
| 523 if (!start_of_run.is_null() && !end_of_run.is_null()) | 541 if (!start_of_run.is_null() && !end_of_run.is_null()) |
| 524 run_duration = (end_of_run - start_of_run).InMilliseconds(); | 542 run_duration = (end_of_run - start_of_run).InMilliseconds(); |
| 525 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | 543 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
| 526 } | 544 } |
| 527 | 545 |
| 528 const std::string ThreadData::thread_name() const { return thread_name_; } | 546 const std::string ThreadData::thread_name() const { return thread_name_; } |
| 529 | 547 |
| 548 // static | |
| 549 void ThreadData::SerializeAllExecutedTasks( | |
| 550 bool reset_max, | |
| 551 SerializedProcessData* process_data, | |
| 552 std::map<const BirthOnThread*, int>* birth_counts) { | |
| 553 if (!kTrackAllTaskObjects) | |
| 554 return; // Not compiled in. | |
| 555 | |
| 556 // Get an unchanging copy of a ThreadData list. | |
| 557 ThreadData* my_list = ThreadData::first(); | |
| 558 | |
| 559 // Gather data serially. | |
| 560 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 561 // grabbing values without the protection of a lock, but it has the advantage | |
| 562 // of working even with threads that don't have message loops. If a user | |
| 563 // sees any strangeness, they can always just run their stats gathering a | |
| 564 // second time. | |
| 565 for (ThreadData* thread_data = my_list; | |
| 566 thread_data; | |
| 567 thread_data = thread_data->next()) { | |
| 568 thread_data->SerializeExecutedTasks(reset_max, process_data, birth_counts); | |
| 569 } | |
| 570 } | |
| 571 | |
| 572 void ThreadData::SerializeExecutedTasks( | |
| 573 bool reset_max, | |
| 574 SerializedProcessData* process_data, | |
| 575 std::map<const BirthOnThread*, int>* birth_counts) { | |
| 576 // Get copy of data, so that the data will not change during the iterations | |
| 577 // and processing. | |
| 578 ThreadData::BirthMap birth_map; | |
| 579 ThreadData::DeathMap death_map; | |
| 580 ThreadData::ParentChildSet parent_child_set; | |
| 581 SnapshotMaps(reset_max, &birth_map, &death_map, &parent_child_set); | |
| 582 | |
| 583 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | |
| 584 it != death_map.end(); ++it) { | |
| 585 process_data->snapshots.push_back( | |
| 586 SerializedSnapshot(*it->first, it->second, thread_name())); | |
| 587 (*birth_counts)[it->first] -= it->first->birth_count(); | |
| 588 } | |
| 589 | |
| 590 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | |
| 591 it != birth_map.end(); ++it) { | |
| 592 (*birth_counts)[it->second] += it->second->birth_count(); | |
| 593 } | |
| 594 | |
| 595 if (!kTrackParentChildLinks) | |
| 596 return; | |
| 597 | |
| 598 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); | |
| 599 it != parent_child_set.end(); ++it) { | |
| 600 process_data->descendants.push_back(SerializedParentChildPair(*it)); | |
| 601 } | |
| 602 } | |
| 603 | |
| 530 // This may be called from another thread. | 604 // This may be called from another thread. |
| 531 void ThreadData::SnapshotMaps(bool reset_max, | 605 void ThreadData::SnapshotMaps(bool reset_max, |
| 532 BirthMap* birth_map, | 606 BirthMap* birth_map, |
| 533 DeathMap* death_map, | 607 DeathMap* death_map, |
| 534 ParentChildSet* parent_child_set) { | 608 ParentChildSet* parent_child_set) { |
| 535 base::AutoLock lock(map_lock_); | 609 base::AutoLock lock(map_lock_); |
| 536 for (BirthMap::const_iterator it = birth_map_.begin(); | 610 for (BirthMap::const_iterator it = birth_map_.begin(); |
| 537 it != birth_map_.end(); ++it) | 611 it != birth_map_.end(); ++it) |
| 538 (*birth_map)[it->first] = it->second; | 612 (*birth_map)[it->first] = it->second; |
| 539 for (DeathMap::iterator it = death_map_.begin(); | 613 for (DeathMap::iterator it = death_map_.begin(); |
| 540 it != death_map_.end(); ++it) { | 614 it != death_map_.end(); ++it) { |
| 541 (*death_map)[it->first] = it->second; | 615 (*death_map)[it->first] = it->second; |
| 542 if (reset_max) | 616 if (reset_max) |
| 543 it->second.ResetMax(); | 617 it->second.ResetMax(); |
| 544 } | 618 } |
| 545 | 619 |
| 546 if (!kTrackParentChildLinks) | 620 if (!kTrackParentChildLinks) |
| 547 return; | 621 return; |
| 548 | 622 |
| 549 for (ParentChildSet::iterator it = parent_child_set_.begin(); | 623 for (ParentChildSet::iterator it = parent_child_set_.begin(); |
| 550 it != parent_child_set_.end(); ++it) | 624 it != parent_child_set_.end(); ++it) |
| 551 parent_child_set->insert(*it); | 625 parent_child_set->insert(*it); |
| 552 } | 626 } |
| 553 | 627 |
| 554 // static | 628 // static |
| 555 void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) { | |
| 556 if (!kTrackAllTaskObjects) | |
| 557 return; // Not compiled in. | |
| 558 // Get an unchanging copy of a ThreadData list. | |
| 559 ThreadData* my_list = ThreadData::first(); | |
| 560 | |
| 561 // Gather data serially. | |
| 562 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 563 // grabbing values without the protection of a lock, but it has the advantage | |
| 564 // of working even with threads that don't have message loops. If a user | |
| 565 // sees any strangeness, they can always just run their stats gathering a | |
| 566 // second time. | |
| 567 for (ThreadData* thread_data = my_list; | |
| 568 thread_data; | |
| 569 thread_data = thread_data->next()) { | |
| 570 // Get copy of data. | |
| 571 ThreadData::BirthMap birth_map; | |
| 572 ThreadData::DeathMap death_map; | |
| 573 ThreadData::ParentChildSet parent_child_set; | |
| 574 thread_data->SnapshotMaps(reset_max, &birth_map, &death_map, | |
| 575 &parent_child_set); | |
| 576 target->Append(*thread_data, birth_map, death_map, parent_child_set); | |
| 577 } | |
| 578 } | |
| 579 | |
| 580 // static | |
| 581 void ThreadData::ResetAllThreadData() { | 629 void ThreadData::ResetAllThreadData() { |
| 582 ThreadData* my_list = first(); | 630 ThreadData* my_list = first(); |
| 583 | 631 |
| 584 for (ThreadData* thread_data = my_list; | 632 for (ThreadData* thread_data = my_list; |
| 585 thread_data; | 633 thread_data; |
| 586 thread_data = thread_data->next()) | 634 thread_data = thread_data->next()) |
| 587 thread_data->Reset(); | 635 thread_data->Reset(); |
| 588 } | 636 } |
| 589 | 637 |
| 590 void ThreadData::Reset() { | 638 void ThreadData::Reset() { |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 778 thread_data_list = thread_data_list->next(); | 826 thread_data_list = thread_data_list->next(); |
| 779 | 827 |
| 780 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 828 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
| 781 next_thread_data->birth_map_.end() != it; ++it) | 829 next_thread_data->birth_map_.end() != it; ++it) |
| 782 delete it->second; // Delete the Birth Records. | 830 delete it->second; // Delete the Birth Records. |
| 783 delete next_thread_data; // Includes all Death Records. | 831 delete next_thread_data; // Includes all Death Records. |
| 784 } | 832 } |
| 785 } | 833 } |
| 786 | 834 |
| 787 //------------------------------------------------------------------------------ | 835 //------------------------------------------------------------------------------ |
| 788 // Individual 3-tuple of birth (place and thread) along with death thread, and | 836 SerializedSnapshot::SerializedSnapshot() { |
| 789 // the accumulated stats for instances (DeathData). | |
| 790 | |
| 791 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, | |
| 792 const ThreadData& death_thread, | |
| 793 const DeathData& death_data) | |
| 794 : birth_(&birth_on_thread), | |
| 795 death_thread_(&death_thread), | |
| 796 death_data_(death_data) { | |
| 797 } | 837 } |
| 798 | 838 |
| 799 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count) | 839 SerializedSnapshot::SerializedSnapshot(const BirthOnThread& birth, |
| 800 : birth_(&birth_on_thread), | 840 const DeathData& death_data, |
| 801 death_thread_(NULL), | 841 const std::string& death_thread_name) |
| 802 death_data_(DeathData(count)) { | 842 : birth(birth), |
| 843 death_data(death_data), | |
| 844 death_thread_name(death_thread_name) { | |
| 803 } | 845 } |
| 804 | 846 |
| 805 const std::string Snapshot::DeathThreadName() const { | 847 SerializedSnapshot::~SerializedSnapshot() { |
| 806 if (death_thread_) | |
| 807 return death_thread_->thread_name(); | |
| 808 return "Still_Alive"; | |
| 809 } | |
| 810 | |
| 811 base::DictionaryValue* Snapshot::ToValue() const { | |
| 812 base::DictionaryValue* dictionary = new base::DictionaryValue; | |
| 813 // TODO(jar): Switch the next two lines to: | |
| 814 // birth_->ToValue("birth", dictionary); | |
| 815 // ...but that will require fixing unit tests, and JS to take | |
| 816 // "birth_location" rather than "location" | |
| 817 dictionary->Set("birth_thread", | |
| 818 base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); | |
| 819 dictionary->Set("location", birth_->location().ToValue()); | |
| 820 | |
| 821 dictionary->Set("death_data", death_data_.ToValue()); | |
| 822 dictionary->Set("death_thread", | |
| 823 base::Value::CreateStringValue(DeathThreadName())); | |
| 824 return dictionary; | |
| 825 } | 848 } |
| 826 | 849 |
| 827 //------------------------------------------------------------------------------ | 850 //------------------------------------------------------------------------------ |
| 828 // DataCollector | 851 // SerializedParentChildPair |
| 829 | 852 |
| 830 DataCollector::DataCollector() {} | 853 SerializedParentChildPair::SerializedParentChildPair(){ |
| 831 | |
| 832 DataCollector::~DataCollector() { | |
| 833 } | 854 } |
| 834 | 855 |
| 835 void DataCollector::Append(const ThreadData& thread_data, | 856 SerializedParentChildPair::SerializedParentChildPair( |
| 836 const ThreadData::BirthMap& birth_map, | 857 const ThreadData::ParentChildPair& parent_child) |
| 837 const ThreadData::DeathMap& death_map, | 858 : parent(*parent_child.first), |
| 838 const ThreadData::ParentChildSet& parent_child_set) { | 859 child(*parent_child.second) { |
| 839 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | |
| 840 it != death_map.end(); ++it) { | |
| 841 collection_.push_back(Snapshot(*it->first, thread_data, it->second)); | |
| 842 global_birth_count_[it->first] -= it->first->birth_count(); | |
| 843 } | |
| 844 | |
| 845 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | |
| 846 it != birth_map.end(); ++it) { | |
| 847 global_birth_count_[it->second] += it->second->birth_count(); | |
| 848 } | |
| 849 | |
| 850 if (!kTrackParentChildLinks) | |
| 851 return; | |
| 852 | |
| 853 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); | |
| 854 it != parent_child_set.end(); ++it) { | |
| 855 parent_child_set_.insert(*it); | |
| 856 } | |
| 857 } | 860 } |
| 858 | 861 |
| 859 DataCollector::Collection* DataCollector::collection() { | 862 SerializedParentChildPair::~SerializedParentChildPair() { |
| 860 return &collection_; | |
| 861 } | 863 } |
| 862 | 864 |
| 863 void DataCollector::AddListOfLivingObjects() { | 865 //------------------------------------------------------------------------------ |
| 864 for (BirthCount::iterator it = global_birth_count_.begin(); | 866 // SerializedProcessData |
| 865 it != global_birth_count_.end(); ++it) { | 867 |
| 866 if (it->second > 0) | 868 SerializedProcessData::SerializedProcessData() |
| 867 collection_.push_back(Snapshot(*it->first, it->second)); | 869 : process_id(base::GetCurrentProcId()) { |
| 868 } | |
| 869 } | 870 } |
| 870 | 871 |
| 871 void DataCollector::ToValue(base::DictionaryValue* dictionary) const { | 872 SerializedProcessData::~SerializedProcessData() { |
| 872 base::ListValue* list = new base::ListValue; | |
| 873 for (size_t i = 0; i < collection_.size(); ++i) { | |
| 874 list->Append(collection_[i].ToValue()); | |
| 875 } | |
| 876 dictionary->Set("list", list); | |
| 877 | |
| 878 base::ListValue* descendants = new base::ListValue; | |
| 879 for (ThreadData::ParentChildSet::const_iterator it = | |
| 880 parent_child_set_.begin(); | |
| 881 it != parent_child_set_.end(); | |
| 882 ++it) { | |
| 883 base::DictionaryValue* parent_child = new base::DictionaryValue; | |
| 884 it->first->ToValue("parent", parent_child); | |
| 885 it->second->ToValue("child", parent_child); | |
| 886 descendants->Append(parent_child); | |
| 887 } | |
| 888 dictionary->Set("descendants", descendants); | |
| 889 } | 873 } |
| 890 | 874 |
| 891 } // namespace tracked_objects | 875 } // namespace tracked_objects |
| OLD | NEW |