Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <limits.h> | 7 #include <limits.h> |
| 8 #include <stdlib.h> | 8 #include <stdlib.h> |
| 9 | 9 |
| 10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 220 | 220 |
| 221 //------------------------------------------------------------------------------ | 221 //------------------------------------------------------------------------------ |
| 222 Births::Births(const Location& location, const ThreadData& current) | 222 Births::Births(const Location& location, const ThreadData& current) |
| 223 : BirthOnThread(location, current), | 223 : BirthOnThread(location, current), |
| 224 birth_count_(1) { } | 224 birth_count_(1) { } |
| 225 | 225 |
| 226 int Births::birth_count() const { return birth_count_; } | 226 int Births::birth_count() const { return birth_count_; } |
| 227 | 227 |
| 228 void Births::RecordBirth() { ++birth_count_; } | 228 void Births::RecordBirth() { ++birth_count_; } |
| 229 | 229 |
| 230 void Births::SubtractBirths(int count) { | |
| 231 birth_count_ -= count; | |
| 232 } | |
| 233 | |
| 230 //------------------------------------------------------------------------------ | 234 //------------------------------------------------------------------------------ |
| 231 // ThreadData maintains the central data for all births and deaths on a single | 235 // ThreadData maintains the central data for all births and deaths on a single |
| 232 // thread. | 236 // thread. |
| 233 | 237 |
| 234 // TODO(jar): We should pull all these static vars together, into a struct, and | 238 // TODO(jar): We should pull all these static vars together, into a struct, and |
| 235 // optimize layout so that we benefit from locality of reference during accesses | 239 // optimize layout so that we benefit from locality of reference during accesses |
| 236 // to them. | 240 // to them. |
| 237 | 241 |
| 238 // static | 242 // static |
| 239 NowFunction* ThreadData::now_function_ = NULL; | 243 NowFunction* ThreadData::now_function_ = NULL; |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 262 // static | 266 // static |
| 263 ThreadData* ThreadData::first_retired_worker_ = NULL; | 267 ThreadData* ThreadData::first_retired_worker_ = NULL; |
| 264 | 268 |
| 265 // static | 269 // static |
| 266 base::LazyInstance<base::Lock>::Leaky | 270 base::LazyInstance<base::Lock>::Leaky |
| 267 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; | 271 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
| 268 | 272 |
| 269 // static | 273 // static |
| 270 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; | 274 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
| 271 | 275 |
| 276 // static | |
| 277 base::LazyInstance<PhasedProcessDataSnapshots> | |
| 278 ThreadData::completed_phases_snapshots_ = LAZY_INSTANCE_INITIALIZER; | |
| 279 | |
| 272 ThreadData::ThreadData(const std::string& suggested_name) | 280 ThreadData::ThreadData(const std::string& suggested_name) |
| 273 : next_(NULL), | 281 : next_(NULL), |
| 274 next_retired_worker_(NULL), | 282 next_retired_worker_(NULL), |
| 275 worker_thread_number_(0), | 283 worker_thread_number_(0), |
| 276 incarnation_count_for_pool_(-1), | 284 incarnation_count_for_pool_(-1), |
| 277 current_stopwatch_(NULL) { | 285 current_stopwatch_(NULL) { |
| 278 DCHECK_GE(suggested_name.size(), 0u); | 286 DCHECK_GE(suggested_name.size(), 0u); |
| 279 thread_name_ = suggested_name; | 287 thread_name_ = suggested_name; |
| 280 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 288 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
| 281 } | 289 } |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 383 return; | 391 return; |
| 384 } | 392 } |
| 385 // We must NOT do any allocations during this callback. | 393 // We must NOT do any allocations during this callback. |
| 386 // Using the simple linked lists avoids all allocations. | 394 // Using the simple linked lists avoids all allocations. |
| 387 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 395 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 388 this->next_retired_worker_ = first_retired_worker_; | 396 this->next_retired_worker_ = first_retired_worker_; |
| 389 first_retired_worker_ = this; | 397 first_retired_worker_ = this; |
| 390 } | 398 } |
| 391 | 399 |
| 392 // static | 400 // static |
| 393 void ThreadData::Snapshot(ProcessDataSnapshot* process_data) { | 401 void ThreadData::GetProcessDataSnapshot( |
| 394 // Add births that have run to completion to |collected_data|. | 402 int current_profiling_phase, |
| 395 // |birth_counts| tracks the total number of births recorded at each location | 403 ProcessDataSnapshot* process_data_snapshot) { |
| 396 // for which we have not seen a death count. | 404 process_data_snapshot->phased_process_data_snapshots = |
| 397 BirthCountMap birth_counts; | 405 completed_phases_snapshots_.Get(); |
| 398 ThreadData::SnapshotAllExecutedTasks(process_data, &birth_counts); | 406 ThreadData::Snapshot( |
| 407 false, &process_data_snapshot | |
| 408 ->phased_process_data_snapshots[current_profiling_phase]); | |
| 409 } | |
| 399 | 410 |
| 400 // Add births that are still active -- i.e. objects that have tallied a birth, | 411 // static |
| 401 // but have not yet tallied a matching death, and hence must be either | 412 void ThreadData::OnProfilingPhaseCompletion(int profiling_phase) { |
| 402 // running, queued up, or being held in limbo for future posting. | 413 if (!kTrackAllTaskObjects) |
| 403 for (BirthCountMap::const_iterator it = birth_counts.begin(); | 414 return; // Not compiled in. |
| 404 it != birth_counts.end(); ++it) { | 415 |
| 405 if (it->second > 0) { | 416 ThreadData::Snapshot(true, |
| 406 process_data->tasks.push_back( | 417 &completed_phases_snapshots_.Get()[profiling_phase]); |
|
Alexei Svitkine (slow)
2015/03/06 20:06:40
Can this function check that this isn't calling wi
vadimt
2015/03/06 21:34:58
Done.
| |
| 407 TaskSnapshot(*it->first, DeathData(it->second), "Still_Alive")); | |
| 408 } | |
| 409 } | |
| 410 } | 418 } |
| 411 | 419 |
| 412 Births* ThreadData::TallyABirth(const Location& location) { | 420 Births* ThreadData::TallyABirth(const Location& location) { |
| 413 BirthMap::iterator it = birth_map_.find(location); | 421 BirthMap::iterator it = birth_map_.find(location); |
| 414 Births* child; | 422 Births* child; |
| 415 if (it != birth_map_.end()) { | 423 if (it != birth_map_.end()) { |
| 416 child = it->second; | 424 child = it->second; |
| 417 child->RecordBirth(); | 425 child->RecordBirth(); |
| 418 } else { | 426 } else { |
| 419 child = new Births(location, *this); // Leak this. | 427 child = new Births(location, *this); // Leak this. |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 431 // Lock since the map may get relocated now, and other threads sometimes | 439 // Lock since the map may get relocated now, and other threads sometimes |
| 432 // snapshot it (but they lock before copying it). | 440 // snapshot it (but they lock before copying it). |
| 433 base::AutoLock lock(map_lock_); | 441 base::AutoLock lock(map_lock_); |
| 434 parent_child_set_.insert(pair); | 442 parent_child_set_.insert(pair); |
| 435 } | 443 } |
| 436 } | 444 } |
| 437 | 445 |
| 438 return child; | 446 return child; |
| 439 } | 447 } |
| 440 | 448 |
| 441 void ThreadData::TallyADeath(const Births& birth, | 449 void ThreadData::TallyADeath(int32 queue_duration, |
| 442 int32 queue_duration, | 450 const TaskStopwatch& stopwatch, |
| 443 const TaskStopwatch& stopwatch) { | 451 Births* birth) { |
| 444 int32 run_duration = stopwatch.RunDurationMs(); | 452 int32 run_duration = stopwatch.RunDurationMs(); |
| 445 | 453 |
| 446 // Stir in some randomness, plus add constant in case durations are zero. | 454 // Stir in some randomness, plus add constant in case durations are zero. |
| 447 const uint32 kSomePrimeNumber = 2147483647; | 455 const uint32 kSomePrimeNumber = 2147483647; |
| 448 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 456 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 449 // An address is going to have some randomness to it as well ;-). | 457 // An address is going to have some randomness to it as well ;-). |
| 450 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); | 458 random_number_ ^= static_cast<uint32>(birth - reinterpret_cast<Births*>(0)); |
| 451 | 459 |
| 452 // We don't have queue durations without OS timer. OS timer is automatically | 460 // We don't have queue durations without OS timer. OS timer is automatically |
| 453 // used for task-post-timing, so the use of an alternate timer implies all | 461 // used for task-post-timing, so the use of an alternate timer implies all |
| 454 // queue times are invalid, unless it was explicitly said that we can trust | 462 // queue times are invalid, unless it was explicitly said that we can trust |
| 455 // the alternate timer. | 463 // the alternate timer. |
| 456 if (kAllowAlternateTimeSourceHandling && | 464 if (kAllowAlternateTimeSourceHandling && |
| 457 now_function_ && | 465 now_function_ && |
| 458 !now_function_is_time_) { | 466 !now_function_is_time_) { |
| 459 queue_duration = 0; | 467 queue_duration = 0; |
| 460 } | 468 } |
| 461 | 469 |
| 462 DeathMap::iterator it = death_map_.find(&birth); | 470 DeathMap::iterator it = death_map_.find(birth); |
| 463 DeathData* death_data; | 471 DeathData* death_data; |
| 464 if (it != death_map_.end()) { | 472 if (it != death_map_.end()) { |
| 465 death_data = &it->second; | 473 death_data = &it->second; |
| 466 } else { | 474 } else { |
| 467 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 475 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
| 468 death_data = &death_map_[&birth]; | 476 death_data = &death_map_[birth]; |
| 469 } // Release lock ASAP. | 477 } // Release lock ASAP. |
| 470 death_data->RecordDeath(queue_duration, run_duration, random_number_); | 478 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
| 471 | 479 |
| 472 if (!kTrackParentChildLinks) | 480 if (!kTrackParentChildLinks) |
| 473 return; | 481 return; |
| 474 if (!parent_stack_.empty()) { // We might get turned off. | 482 if (!parent_stack_.empty()) { // We might get turned off. |
| 475 DCHECK_EQ(parent_stack_.top(), &birth); | 483 DCHECK_EQ(parent_stack_.top(), birth); |
| 476 parent_stack_.pop(); | 484 parent_stack_.pop(); |
| 477 } | 485 } |
| 478 } | 486 } |
| 479 | 487 |
| 480 // static | 488 // static |
| 481 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 489 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
| 482 if (!kTrackAllTaskObjects) | 490 if (!kTrackAllTaskObjects) |
| 483 return NULL; // Not compiled in. | 491 return NULL; // Not compiled in. |
| 484 | 492 |
| 485 if (!TrackingStatus()) | 493 if (!TrackingStatus()) |
| 486 return NULL; | 494 return NULL; |
| 487 ThreadData* current_thread_data = Get(); | 495 ThreadData* current_thread_data = Get(); |
| 488 if (!current_thread_data) | 496 if (!current_thread_data) |
| 489 return NULL; | 497 return NULL; |
| 490 return current_thread_data->TallyABirth(location); | 498 return current_thread_data->TallyABirth(location); |
| 491 } | 499 } |
| 492 | 500 |
| 493 // static | 501 // static |
| 494 void ThreadData::TallyRunOnNamedThreadIfTracking( | 502 void ThreadData::TallyRunOnNamedThreadIfTracking( |
| 495 const base::TrackingInfo& completed_task, | 503 const base::TrackingInfo& completed_task, |
| 496 const TaskStopwatch& stopwatch) { | 504 const TaskStopwatch& stopwatch) { |
| 497 if (!kTrackAllTaskObjects) | 505 if (!kTrackAllTaskObjects) |
| 498 return; // Not compiled in. | 506 return; // Not compiled in. |
| 499 | 507 |
| 500 // Even if we have been DEACTIVATED, we will process any pending births so | 508 // Even if we have been DEACTIVATED, we will process any pending births so |
| 501 // that our data structures (which counted the outstanding births) remain | 509 // that our data structures (which counted the outstanding births) remain |
| 502 // consistent. | 510 // consistent. |
| 503 const Births* birth = completed_task.birth_tally; | 511 Births* birth = completed_task.birth_tally; |
| 504 if (!birth) | 512 if (!birth) |
| 505 return; | 513 return; |
| 506 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 514 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 507 if (!current_thread_data) | 515 if (!current_thread_data) |
| 508 return; | 516 return; |
| 509 | 517 |
| 510 // Watch out for a race where status_ is changing, and hence one or both | 518 // Watch out for a race where status_ is changing, and hence one or both |
| 511 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 519 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
| 512 // get a time value since we "weren't tracking" and we were trying to be | 520 // get a time value since we "weren't tracking" and we were trying to be |
| 513 // efficient by not calling for a genuine time value. For simplicity, we'll | 521 // efficient by not calling for a genuine time value. For simplicity, we'll |
| 514 // use a default zero duration when we can't calculate a true value. | 522 // use a default zero duration when we can't calculate a true value. |
| 515 TrackedTime start_of_run = stopwatch.StartTime(); | 523 TrackedTime start_of_run = stopwatch.StartTime(); |
| 516 int32 queue_duration = 0; | 524 int32 queue_duration = 0; |
| 517 if (!start_of_run.is_null()) { | 525 if (!start_of_run.is_null()) { |
| 518 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 526 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
| 519 .InMilliseconds(); | 527 .InMilliseconds(); |
| 520 } | 528 } |
| 521 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 529 current_thread_data->TallyADeath(queue_duration, stopwatch, birth); |
| 522 } | 530 } |
| 523 | 531 |
| 524 // static | 532 // static |
| 525 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 533 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
| 526 const Births* birth, | |
| 527 const TrackedTime& time_posted, | 534 const TrackedTime& time_posted, |
| 528 const TaskStopwatch& stopwatch) { | 535 const TaskStopwatch& stopwatch, |
| 536 Births* birth) { | |
| 529 if (!kTrackAllTaskObjects) | 537 if (!kTrackAllTaskObjects) |
| 530 return; // Not compiled in. | 538 return; // Not compiled in. |
| 531 | 539 |
| 532 // Even if we have been DEACTIVATED, we will process any pending births so | 540 // Even if we have been DEACTIVATED, we will process any pending births so |
| 533 // that our data structures (which counted the outstanding births) remain | 541 // that our data structures (which counted the outstanding births) remain |
| 534 // consistent. | 542 // consistent. |
| 535 if (!birth) | 543 if (!birth) |
| 536 return; | 544 return; |
| 537 | 545 |
| 538 // TODO(jar): Support the option to coalesce all worker-thread activity under | 546 // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 539 // one ThreadData instance that uses locks to protect *all* access. This will | 547 // one ThreadData instance that uses locks to protect *all* access. This will |
| 540 // reduce memory (making it provably bounded), but run incrementally slower | 548 // reduce memory (making it provably bounded), but run incrementally slower |
| 541 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 549 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
| 542 // that the locks on TallyADeath will be *after* the worker thread has run, | 550 // that the locks on TallyADeath will be *after* the worker thread has run, |
| 543 // and hence nothing will be waiting for the completion (... besides some | 551 // and hence nothing will be waiting for the completion (... besides some |
| 544 // other thread that might like to run). Also, the worker threads tasks are | 552 // other thread that might like to run). Also, the worker threads tasks are |
| 545 // generally longer, and hence the cost of the lock may perchance be amortized | 553 // generally longer, and hence the cost of the lock may perchance be amortized |
| 546 // over the long task's lifetime. | 554 // over the long task's lifetime. |
| 547 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 555 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 548 if (!current_thread_data) | 556 if (!current_thread_data) |
| 549 return; | 557 return; |
| 550 | 558 |
| 551 TrackedTime start_of_run = stopwatch.StartTime(); | 559 TrackedTime start_of_run = stopwatch.StartTime(); |
| 552 int32 queue_duration = 0; | 560 int32 queue_duration = 0; |
| 553 if (!start_of_run.is_null()) { | 561 if (!start_of_run.is_null()) { |
| 554 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 562 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
| 555 } | 563 } |
| 556 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 564 current_thread_data->TallyADeath(queue_duration, stopwatch, birth); |
| 557 } | 565 } |
| 558 | 566 |
| 559 // static | 567 // static |
| 560 void ThreadData::TallyRunInAScopedRegionIfTracking( | 568 void ThreadData::TallyRunInAScopedRegionIfTracking( |
| 561 const Births* birth, | 569 const TaskStopwatch& stopwatch, |
| 562 const TaskStopwatch& stopwatch) { | 570 Births* birth) { |
| 563 if (!kTrackAllTaskObjects) | 571 if (!kTrackAllTaskObjects) |
| 564 return; // Not compiled in. | 572 return; // Not compiled in. |
| 565 | 573 |
| 566 // Even if we have been DEACTIVATED, we will process any pending births so | 574 // Even if we have been DEACTIVATED, we will process any pending births so |
| 567 // that our data structures (which counted the outstanding births) remain | 575 // that our data structures (which counted the outstanding births) remain |
| 568 // consistent. | 576 // consistent. |
| 569 if (!birth) | 577 if (!birth) |
| 570 return; | 578 return; |
| 571 | 579 |
| 572 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 580 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 573 if (!current_thread_data) | 581 if (!current_thread_data) |
| 574 return; | 582 return; |
| 575 | 583 |
| 576 int32 queue_duration = 0; | 584 int32 queue_duration = 0; |
| 577 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 585 current_thread_data->TallyADeath(queue_duration, stopwatch, birth); |
| 578 } | 586 } |
| 579 | 587 |
| 580 // static | 588 // static |
| 581 void ThreadData::SnapshotAllExecutedTasks(ProcessDataSnapshot* process_data, | 589 void ThreadData::SnapshotAllExecutedTasks( |
| 582 BirthCountMap* birth_counts) { | 590 bool reset, |
| 591 ProcessDataPhaseSnapshot* process_data_phase, | |
| 592 BirthCountMap* birth_counts) { | |
| 583 if (!kTrackAllTaskObjects) | 593 if (!kTrackAllTaskObjects) |
| 584 return; // Not compiled in. | 594 return; // Not compiled in. |
| 585 | 595 |
| 586 // Get an unchanging copy of a ThreadData list. | 596 // Get an unchanging copy of a ThreadData list. |
| 587 ThreadData* my_list = ThreadData::first(); | 597 ThreadData* my_list = ThreadData::first(); |
| 588 | 598 |
| 589 // Gather data serially. | 599 // Gather data serially. |
| 590 // This hackish approach *can* get some slighly corrupt tallies, as we are | 600 // This hackish approach *can* get some slighly corrupt tallies, as we are |
| 591 // grabbing values without the protection of a lock, but it has the advantage | 601 // grabbing values without the protection of a lock, but it has the advantage |
| 592 // of working even with threads that don't have message loops. If a user | 602 // of working even with threads that don't have message loops. If a user |
| 593 // sees any strangeness, they can always just run their stats gathering a | 603 // sees any strangeness, they can always just run their stats gathering a |
| 594 // second time. | 604 // second time. |
| 595 for (ThreadData* thread_data = my_list; | 605 for (ThreadData* thread_data = my_list; |
| 596 thread_data; | 606 thread_data; |
| 597 thread_data = thread_data->next()) { | 607 thread_data = thread_data->next()) { |
| 598 thread_data->SnapshotExecutedTasks(process_data, birth_counts); | 608 thread_data->SnapshotExecutedTasks(reset, process_data_phase, birth_counts); |
| 599 } | 609 } |
| 600 } | 610 } |
| 601 | 611 |
| 602 void ThreadData::SnapshotExecutedTasks(ProcessDataSnapshot* process_data, | 612 // static |
| 603 BirthCountMap* birth_counts) { | 613 void ThreadData::Snapshot(bool reset, |
| 614 ProcessDataPhaseSnapshot* process_data_phase) { | |
| 615 // Add births that have run to completion to |collected_data|. | |
| 616 // |birth_counts| tracks the total number of births recorded at each location | |
| 617 // for which we have not seen a death count. | |
| 618 BirthCountMap birth_counts; | |
| 619 ThreadData::SnapshotAllExecutedTasks(reset, process_data_phase, | |
| 620 &birth_counts); | |
| 621 | |
| 622 // Add births that are still active -- i.e. objects that have tallied a birth, | |
| 623 // but have not yet tallied a matching death, and hence must be either | |
| 624 // running, queued up, or being held in limbo for future posting. | |
| 625 for (const auto& i : birth_counts) { | |
| 626 if (i.second > 0) { | |
| 627 process_data_phase->tasks.push_back( | |
| 628 TaskSnapshot(*i.first, DeathData(i.second), "Still_Alive")); | |
| 629 } | |
| 630 } | |
| 631 } | |
| 632 | |
| 633 void ThreadData::SnapshotExecutedTasks( | |
| 634 bool reset, | |
| 635 ProcessDataPhaseSnapshot* process_data_phase, | |
| 636 BirthCountMap* birth_counts) { | |
| 604 // Get copy of data, so that the data will not change during the iterations | 637 // Get copy of data, so that the data will not change during the iterations |
| 605 // and processing. | 638 // and processing. |
| 606 ThreadData::BirthMap birth_map; | 639 ThreadData::BirthMap birth_map; |
| 607 ThreadData::DeathMap death_map; | 640 ThreadData::DeathMap death_map; |
| 608 ThreadData::ParentChildSet parent_child_set; | 641 ThreadData::ParentChildSet parent_child_set; |
| 609 SnapshotMaps(&birth_map, &death_map, &parent_child_set); | 642 SnapshotMaps(reset, &birth_map, &death_map, &parent_child_set); |
| 610 | 643 |
| 611 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | 644 for (const auto& i : death_map) { |
| 612 it != death_map.end(); ++it) { | 645 process_data_phase->tasks.push_back( |
| 613 process_data->tasks.push_back( | 646 TaskSnapshot(*i.first, i.second, thread_name())); |
| 614 TaskSnapshot(*it->first, it->second, thread_name())); | 647 // We don't populate birth_counts if |reset| is true. |
| 615 (*birth_counts)[it->first] -= it->first->birth_count(); | 648 if (!reset) |
| 649 (*birth_counts)[i.first] -= i.first->birth_count(); | |
| 616 } | 650 } |
| 617 | 651 |
| 618 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | 652 for (const auto& i : birth_map) { |
| 619 it != birth_map.end(); ++it) { | 653 (*birth_counts)[i.second] += i.second->birth_count(); |
| 620 (*birth_counts)[it->second] += it->second->birth_count(); | |
| 621 } | 654 } |
| 622 | 655 |
| 623 if (!kTrackParentChildLinks) | 656 if (!kTrackParentChildLinks) |
| 624 return; | 657 return; |
| 625 | 658 |
| 626 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); | 659 for (const auto& i : parent_child_set) { |
| 627 it != parent_child_set.end(); ++it) { | 660 process_data_phase->descendants.push_back(ParentChildPairSnapshot(i)); |
| 628 process_data->descendants.push_back(ParentChildPairSnapshot(*it)); | |
| 629 } | 661 } |
| 630 } | 662 } |
| 631 | 663 |
| 632 // This may be called from another thread. | 664 // This may be called from another thread. |
| 633 void ThreadData::SnapshotMaps(BirthMap* birth_map, | 665 void ThreadData::SnapshotMaps(bool reset, |
| 666 BirthMap* birth_map, | |
| 634 DeathMap* death_map, | 667 DeathMap* death_map, |
| 635 ParentChildSet* parent_child_set) { | 668 ParentChildSet* parent_child_set) { |
| 636 base::AutoLock lock(map_lock_); | 669 base::AutoLock lock(map_lock_); |
| 637 for (BirthMap::const_iterator it = birth_map_.begin(); | 670 if (!reset) { |
| 638 it != birth_map_.end(); ++it) | 671 // When reset is not requested, snapshot births. |
| 639 (*birth_map)[it->first] = it->second; | 672 for (const auto& i : birth_map_) |
| 640 for (DeathMap::iterator it = death_map_.begin(); | 673 (*birth_map)[i.first] = i.second; |
| 641 it != death_map_.end(); ++it) { | 674 } |
| 642 (*death_map)[it->first] = it->second; | 675 for (auto& i : death_map_) { |
| 676 // Don't snapshot deaths with 0 count. Deaths with 0 count can result from | |
| 677 // prior calls to SnapshotMaps with reset=true param. | |
| 678 if (i.second.count() <= 0) | |
| 679 continue; | |
| 680 | |
| 681 (*death_map)[i.first] = i.second; | |
| 682 if (reset) { | |
| 683 i.first->SubtractBirths(i.second.count()); | |
| 684 i.second.Clear(); | |
| 685 } | |
| 643 } | 686 } |
| 644 | 687 |
| 645 if (!kTrackParentChildLinks) | 688 if (!kTrackParentChildLinks) |
| 646 return; | 689 return; |
| 647 | 690 |
| 648 for (ParentChildSet::iterator it = parent_child_set_.begin(); | 691 for (const auto& i : parent_child_set_) |
| 649 it != parent_child_set_.end(); ++it) | 692 parent_child_set->insert(i); |
| 650 parent_child_set->insert(*it); | |
| 651 } | 693 } |
| 652 | 694 |
| 653 static void OptionallyInitializeAlternateTimer() { | 695 static void OptionallyInitializeAlternateTimer() { |
| 654 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 696 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
| 655 if (alternate_time_source) | 697 if (alternate_time_source) |
| 656 ThreadData::SetAlternateTimeSource(alternate_time_source); | 698 ThreadData::SetAlternateTimeSource(alternate_time_source); |
| 657 } | 699 } |
| 658 | 700 |
| 659 bool ThreadData::Initialize() { | 701 bool ThreadData::Initialize() { |
| 660 if (!kTrackAllTaskObjects) | 702 if (!kTrackAllTaskObjects) |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 824 // Do actual recursive delete in all ThreadData instances. | 866 // Do actual recursive delete in all ThreadData instances. |
| 825 while (thread_data_list) { | 867 while (thread_data_list) { |
| 826 ThreadData* next_thread_data = thread_data_list; | 868 ThreadData* next_thread_data = thread_data_list; |
| 827 thread_data_list = thread_data_list->next(); | 869 thread_data_list = thread_data_list->next(); |
| 828 | 870 |
| 829 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 871 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
| 830 next_thread_data->birth_map_.end() != it; ++it) | 872 next_thread_data->birth_map_.end() != it; ++it) |
| 831 delete it->second; // Delete the Birth Records. | 873 delete it->second; // Delete the Birth Records. |
| 832 delete next_thread_data; // Includes all Death Records. | 874 delete next_thread_data; // Includes all Death Records. |
| 833 } | 875 } |
| 876 | |
| 877 completed_phases_snapshots_.Get().clear(); | |
| 834 } | 878 } |
| 835 | 879 |
| 836 //------------------------------------------------------------------------------ | 880 //------------------------------------------------------------------------------ |
| 837 TaskStopwatch::TaskStopwatch() | 881 TaskStopwatch::TaskStopwatch() |
| 838 : wallclock_duration_ms_(0), | 882 : wallclock_duration_ms_(0), |
| 839 current_thread_data_(NULL), | 883 current_thread_data_(NULL), |
| 840 excluded_duration_ms_(0), | 884 excluded_duration_ms_(0), |
| 841 parent_(NULL) { | 885 parent_(NULL) { |
| 842 #if DCHECK_IS_ON() | 886 #if DCHECK_IS_ON() |
| 843 state_ = CREATED; | 887 state_ = CREATED; |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 952 ParentChildPairSnapshot::ParentChildPairSnapshot( | 996 ParentChildPairSnapshot::ParentChildPairSnapshot( |
| 953 const ThreadData::ParentChildPair& parent_child) | 997 const ThreadData::ParentChildPair& parent_child) |
| 954 : parent(*parent_child.first), | 998 : parent(*parent_child.first), |
| 955 child(*parent_child.second) { | 999 child(*parent_child.second) { |
| 956 } | 1000 } |
| 957 | 1001 |
| 958 ParentChildPairSnapshot::~ParentChildPairSnapshot() { | 1002 ParentChildPairSnapshot::~ParentChildPairSnapshot() { |
| 959 } | 1003 } |
| 960 | 1004 |
| 961 //------------------------------------------------------------------------------ | 1005 //------------------------------------------------------------------------------ |
| 962 // ProcessDataSnapshot | 1006 // ProcessDataPhaseSnapshot |
| 1007 | |
| 1008 ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() { | |
| 1009 } | |
| 1010 | |
| 1011 ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() { | |
| 1012 } | |
| 1013 | |
| 1014 //------------------------------------------------------------------------------ | |
| 1015 // ProcessDataPhaseSnapshot | |
| 963 | 1016 |
| 964 ProcessDataSnapshot::ProcessDataSnapshot() | 1017 ProcessDataSnapshot::ProcessDataSnapshot() |
| 965 #if !defined(OS_NACL) | 1018 #if !defined(OS_NACL) |
| 966 : process_id(base::GetCurrentProcId()) { | 1019 : process_id(base::GetCurrentProcId()) { |
| 967 #else | 1020 #else |
| 968 : process_id(0) { | 1021 : process_id(0) { |
| 969 #endif | 1022 #endif |
| 970 } | 1023 } |
| 971 | 1024 |
| 972 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1025 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 973 } | 1026 } |
| 974 | 1027 |
| 975 } // namespace tracked_objects | 1028 } // namespace tracked_objects |
| OLD | NEW |