Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <ctype.h> | 7 #include <ctype.h> |
| 8 #include <limits.h> | 8 #include <limits.h> |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 | 10 |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 161 // TODO(jar): I need to see if this macro to optimize branching is worth using. | 161 // TODO(jar): I need to see if this macro to optimize branching is worth using. |
| 162 // | 162 // |
| 163 // This macro has no branching, so it is surely fast, and is equivalent to: | 163 // This macro has no branching, so it is surely fast, and is equivalent to: |
| 164 // if (assign_it) | 164 // if (assign_it) |
| 165 // target = source; | 165 // target = source; |
| 166 // We use a macro rather than a template to force this to inline. | 166 // We use a macro rather than a template to force this to inline. |
| 167 // Related code for calculating max is discussed on the web. | 167 // Related code for calculating max is discussed on the web. |
| 168 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ | 168 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
| 169 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) | 169 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) |
| 170 | 170 |
| 171 void DeathData::RecordDurations(const int32_t queue_duration, | 171 void DeathData::RecordDurations(const base::TimeDelta queue_duration, |
| 172 const int32_t run_duration, | 172 const base::TimeDelta run_duration, |
| 173 const uint32_t random_number) { | 173 const uint32_t random_number) { |
| 174 // We'll just clamp at INT_MAX, but we should note this in the UI as such. | 174 // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
| 175 if (count_ < INT_MAX) | 175 if (count_ < INT_MAX) |
| 176 base::subtle::NoBarrier_Store(&count_, count_ + 1); | 176 base::subtle::NoBarrier_Store(&count_, count_ + 1); |
| 177 | 177 |
| 178 int sample_probability_count = | 178 int sample_probability_count = |
| 179 base::subtle::NoBarrier_Load(&sample_probability_count_); | 179 base::subtle::NoBarrier_Load(&sample_probability_count_); |
| 180 if (sample_probability_count < INT_MAX) | 180 if (sample_probability_count < INT_MAX) |
| 181 ++sample_probability_count; | 181 ++sample_probability_count; |
| 182 base::subtle::NoBarrier_Store(&sample_probability_count_, | 182 base::subtle::NoBarrier_Store(&sample_probability_count_, |
| 183 sample_probability_count); | 183 sample_probability_count); |
| 184 | 184 |
| 185 base::subtle::NoBarrier_Store(&queue_duration_sum_, | 185 base::subtle::NoBarrier_Store( |
| 186 queue_duration_sum_ + queue_duration); | 186 &queue_duration_sum_, |
| 187 base::subtle::NoBarrier_Store(&run_duration_sum_, | 187 queue_duration_sum_ + queue_duration.InMilliseconds()); |
| 188 run_duration_sum_ + run_duration); | 188 base::subtle::NoBarrier_Store( |
| 189 &run_duration_sum_, run_duration_sum_ + run_duration.InMilliseconds()); | |
| 189 | 190 |
| 190 if (queue_duration_max() < queue_duration) | 191 if (queue_duration_max() < queue_duration.InMilliseconds()) |
| 191 base::subtle::NoBarrier_Store(&queue_duration_max_, queue_duration); | 192 base::subtle::NoBarrier_Store(&queue_duration_max_, |
| 192 if (run_duration_max() < run_duration) | 193 queue_duration.InMilliseconds()); |
| 193 base::subtle::NoBarrier_Store(&run_duration_max_, run_duration); | 194 if (run_duration_max() < run_duration.InMilliseconds()) |
| 195 base::subtle::NoBarrier_Store(&run_duration_max_, | |
| 196 run_duration.InMilliseconds()); | |
| 194 | 197 |
| 195 // Take a uniformly distributed sample over all durations ever supplied during | 198 // Take a uniformly distributed sample over all durations ever supplied during |
| 196 // the current profiling phase. | 199 // the current profiling phase. |
| 197 // The probability that we (instead) use this new sample is | 200 // The probability that we (instead) use this new sample is |
| 198 // 1/sample_probability_count_. This results in a completely uniform selection | 201 // 1/sample_probability_count_. This results in a completely uniform selection |
| 199 // of the sample (at least when we don't clamp sample_probability_count_... | 202 // of the sample (at least when we don't clamp sample_probability_count_... |
| 200 // but that should be inconsequentially likely). We ignore the fact that we | 203 // but that should be inconsequentially likely). We ignore the fact that we |
| 201 // correlated our selection of a sample to the run and queue times (i.e., we | 204 // correlated our selection of a sample to the run and queue times (i.e., we |
| 202 // used them to generate random_number). | 205 // used them to generate random_number). |
| 203 CHECK_GT(sample_probability_count, 0); | 206 CHECK_GT(sample_probability_count, 0); |
| 204 if (0 == (random_number % sample_probability_count)) { | 207 if (0 == (random_number % sample_probability_count)) { |
| 205 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); | 208 base::subtle::NoBarrier_Store(&queue_duration_sample_, |
| 206 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); | 209 queue_duration.InMilliseconds()); |
| 210 base::subtle::NoBarrier_Store(&run_duration_sample_, | |
| 211 run_duration.InMilliseconds()); | |
| 207 } | 212 } |
| 208 } | 213 } |
| 209 | 214 |
| 210 void DeathData::RecordAllocations(const uint32_t alloc_ops, | 215 void DeathData::RecordAllocations(const uint32_t alloc_ops, |
| 211 const uint32_t free_ops, | 216 const uint32_t free_ops, |
| 212 const uint32_t allocated_bytes, | 217 const uint32_t allocated_bytes, |
| 213 const uint32_t freed_bytes, | 218 const uint32_t freed_bytes, |
| 214 const uint32_t alloc_overhead_bytes, | 219 const uint32_t alloc_overhead_bytes, |
| 215 const uint32_t max_allocated_bytes) { | 220 const uint32_t max_allocated_bytes) { |
| 216 #if !defined(ARCH_CPU_64_BITS) | 221 #if !defined(ARCH_CPU_64_BITS) |
| (...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 526 | 531 |
| 527 ThreadData::~ThreadData() { | 532 ThreadData::~ThreadData() { |
| 528 } | 533 } |
| 529 | 534 |
| 530 void ThreadData::PushToHeadOfList() { | 535 void ThreadData::PushToHeadOfList() { |
| 531 // Toss in a hint of randomness (atop the uniniitalized value). | 536 // Toss in a hint of randomness (atop the uniniitalized value). |
| 532 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, | 537 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
| 533 sizeof(random_number_)); | 538 sizeof(random_number_)); |
| 534 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); | 539 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); |
| 535 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); | 540 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); |
| 536 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); | 541 random_number_ ^= (Now() - base::TimeTicks()).InMilliseconds(); |
|
gab
2017/06/26 21:48:12
This is a bitwise XOR, not sure what it means to m
tdresser
2017/06/27 13:21:56
Done.
| |
| 537 | 542 |
| 538 DCHECK(!next_); | 543 DCHECK(!next_); |
| 539 base::AutoLock lock(*list_lock_.Pointer()); | 544 base::AutoLock lock(*list_lock_.Pointer()); |
| 540 incarnation_count_for_pool_ = incarnation_counter_; | 545 incarnation_count_for_pool_ = incarnation_counter_; |
| 541 next_ = all_thread_data_list_head_; | 546 next_ = all_thread_data_list_head_; |
| 542 all_thread_data_list_head_ = this; | 547 all_thread_data_list_head_ = this; |
| 543 } | 548 } |
| 544 | 549 |
| 545 // static | 550 // static |
| 546 ThreadData* ThreadData::first() { | 551 ThreadData* ThreadData::first() { |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 671 // Lock since the map may get relocated now, and other threads sometimes | 676 // Lock since the map may get relocated now, and other threads sometimes |
| 672 // snapshot it (but they lock before copying it). | 677 // snapshot it (but they lock before copying it). |
| 673 base::AutoLock lock(map_lock_); | 678 base::AutoLock lock(map_lock_); |
| 674 birth_map_[location] = child; | 679 birth_map_[location] = child; |
| 675 } | 680 } |
| 676 | 681 |
| 677 return child; | 682 return child; |
| 678 } | 683 } |
| 679 | 684 |
| 680 void ThreadData::TallyADeath(const Births& births, | 685 void ThreadData::TallyADeath(const Births& births, |
| 681 int32_t queue_duration, | 686 const base::TimeDelta queue_duration, |
| 682 const TaskStopwatch& stopwatch) { | 687 const TaskStopwatch& stopwatch) { |
| 683 int32_t run_duration = stopwatch.RunDurationMs(); | 688 base::TimeDelta run_duration = stopwatch.RunDuration(); |
| 684 | 689 |
| 685 // Stir in some randomness, plus add constant in case durations are zero. | 690 // Stir in some randomness, plus add constant in case durations are zero. |
| 686 const uint32_t kSomePrimeNumber = 2147483647; | 691 const uint32_t kSomePrimeNumber = 2147483647; |
| 687 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 692 random_number_ += queue_duration.InMilliseconds() + |
| 693 run_duration.InMilliseconds() + kSomePrimeNumber; | |
| 688 // An address is going to have some randomness to it as well ;-). | 694 // An address is going to have some randomness to it as well ;-). |
| 689 random_number_ ^= | 695 random_number_ ^= |
| 690 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); | 696 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); |
| 691 | 697 |
| 692 DeathMap::iterator it = death_map_.find(&births); | 698 DeathMap::iterator it = death_map_.find(&births); |
| 693 DeathData* death_data; | 699 DeathData* death_data; |
| 694 if (it != death_map_.end()) { | 700 if (it != death_map_.end()) { |
| 695 death_data = &it->second; | 701 death_data = &it->second; |
| 696 } else { | 702 } else { |
| 697 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 703 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 736 return; | 742 return; |
| 737 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 743 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 738 if (!current_thread_data) | 744 if (!current_thread_data) |
| 739 return; | 745 return; |
| 740 | 746 |
| 741 // Watch out for a race where status_ is changing, and hence one or both | 747 // Watch out for a race where status_ is changing, and hence one or both |
| 742 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 748 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
| 743 // get a time value since we "weren't tracking" and we were trying to be | 749 // get a time value since we "weren't tracking" and we were trying to be |
| 744 // efficient by not calling for a genuine time value. For simplicity, we'll | 750 // efficient by not calling for a genuine time value. For simplicity, we'll |
| 745 // use a default zero duration when we can't calculate a true value. | 751 // use a default zero duration when we can't calculate a true value. |
| 746 TrackedTime start_of_run = stopwatch.StartTime(); | 752 base::TimeTicks start_of_run = stopwatch.StartTime(); |
| 747 int32_t queue_duration = 0; | 753 base::TimeDelta queue_duration; |
| 748 if (!start_of_run.is_null()) { | 754 if (!start_of_run.is_null()) { |
| 749 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 755 queue_duration = start_of_run - completed_task.EffectiveTimePosted(); |
| 750 .InMilliseconds(); | |
| 751 } | 756 } |
| 752 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); | 757 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 753 } | 758 } |
| 754 | 759 |
| 755 // static | 760 // static |
| 756 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 761 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
| 757 const Births* births, | 762 const Births* births, |
| 758 const TrackedTime& time_posted, | 763 const base::TimeTicks& time_posted, |
| 759 const TaskStopwatch& stopwatch) { | 764 const TaskStopwatch& stopwatch) { |
| 760 // Even if we have been DEACTIVATED, we will process any pending births so | 765 // Even if we have been DEACTIVATED, we will process any pending births so |
| 761 // that our data structures (which counted the outstanding births) remain | 766 // that our data structures (which counted the outstanding births) remain |
| 762 // consistent. | 767 // consistent. |
| 763 if (!births) | 768 if (!births) |
| 764 return; | 769 return; |
| 765 | 770 |
| 766 // TODO(jar): Support the option to coalesce all worker-thread activity under | 771 // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 767 // one ThreadData instance that uses locks to protect *all* access. This will | 772 // one ThreadData instance that uses locks to protect *all* access. This will |
| 768 // reduce memory (making it provably bounded), but run incrementally slower | 773 // reduce memory (making it provably bounded), but run incrementally slower |
| 769 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 774 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
| 770 // that the locks on TallyADeath will be *after* the worker thread has run, | 775 // that the locks on TallyADeath will be *after* the worker thread has run, |
| 771 // and hence nothing will be waiting for the completion (... besides some | 776 // and hence nothing will be waiting for the completion (... besides some |
| 772 // other thread that might like to run). Also, the worker threads tasks are | 777 // other thread that might like to run). Also, the worker threads tasks are |
| 773 // generally longer, and hence the cost of the lock may perchance be amortized | 778 // generally longer, and hence the cost of the lock may perchance be amortized |
| 774 // over the long task's lifetime. | 779 // over the long task's lifetime. |
| 775 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 780 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 776 if (!current_thread_data) | 781 if (!current_thread_data) |
| 777 return; | 782 return; |
| 778 | 783 |
| 779 TrackedTime start_of_run = stopwatch.StartTime(); | 784 base::TimeTicks start_of_run = stopwatch.StartTime(); |
| 780 int32_t queue_duration = 0; | 785 base::TimeDelta queue_duration; |
| 781 if (!start_of_run.is_null()) { | 786 if (!start_of_run.is_null()) { |
| 782 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 787 queue_duration = start_of_run - time_posted; |
| 783 } | 788 } |
| 784 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); | 789 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 785 } | 790 } |
| 786 | 791 |
| 787 // static | 792 // static |
| 788 void ThreadData::TallyRunInAScopedRegionIfTracking( | 793 void ThreadData::TallyRunInAScopedRegionIfTracking( |
| 789 const Births* births, | 794 const Births* births, |
| 790 const TaskStopwatch& stopwatch) { | 795 const TaskStopwatch& stopwatch) { |
| 791 // Even if we have been DEACTIVATED, we will process any pending births so | 796 // Even if we have been DEACTIVATED, we will process any pending births so |
| 792 // that our data structures (which counted the outstanding births) remain | 797 // that our data structures (which counted the outstanding births) remain |
| 793 // consistent. | 798 // consistent. |
| 794 if (!births) | 799 if (!births) |
| 795 return; | 800 return; |
| 796 | 801 |
| 797 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 802 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 798 if (!current_thread_data) | 803 if (!current_thread_data) |
| 799 return; | 804 return; |
| 800 | 805 |
| 801 int32_t queue_duration = 0; | 806 base::TimeDelta queue_duration; |
| 802 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); | 807 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 803 } | 808 } |
| 804 | 809 |
| 805 void ThreadData::SnapshotExecutedTasks( | 810 void ThreadData::SnapshotExecutedTasks( |
| 806 int current_profiling_phase, | 811 int current_profiling_phase, |
| 807 PhasedProcessDataSnapshotMap* phased_snapshots, | 812 PhasedProcessDataSnapshotMap* phased_snapshots, |
| 808 BirthCountMap* birth_counts) { | 813 BirthCountMap* birth_counts) { |
| 809 // Get copy of data, so that the data will not change during the iterations | 814 // Get copy of data, so that the data will not change during the iterations |
| 810 // and processing. | 815 // and processing. |
| 811 BirthMap birth_map; | 816 BirthMap birth_map; |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 920 bool ThreadData::TrackingStatus() { | 925 bool ThreadData::TrackingStatus() { |
| 921 return base::subtle::Acquire_Load(&status_) > DEACTIVATED; | 926 return base::subtle::Acquire_Load(&status_) > DEACTIVATED; |
| 922 } | 927 } |
| 923 | 928 |
| 924 // static | 929 // static |
| 925 void ThreadData::EnableProfilerTiming() { | 930 void ThreadData::EnableProfilerTiming() { |
| 926 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); | 931 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); |
| 927 } | 932 } |
| 928 | 933 |
| 929 // static | 934 // static |
| 930 TrackedTime ThreadData::Now() { | 935 base::TimeTicks ThreadData::Now() { |
| 931 if (now_function_for_testing_) | 936 if (now_function_for_testing_) |
| 932 return TrackedTime::FromMilliseconds((*now_function_for_testing_)()); | 937 return base::TimeTicks::FromInternalValue((*now_function_for_testing_)() * |
| 938 1000); | |
|
gab
2017/06/26 21:48:12
Why is this changing?
tdresser
2017/06/27 13:21:56
There is no base::TimeTicks::FromMilliseconds.
I'd
| |
| 933 if (IsProfilerTimingEnabled() && TrackingStatus()) | 939 if (IsProfilerTimingEnabled() && TrackingStatus()) |
| 934 return TrackedTime::Now(); | 940 return base::TimeTicks::Now(); |
| 935 return TrackedTime(); // Super fast when disabled, or not compiled. | 941 return base::TimeTicks(); // Super fast when disabled, or not compiled. |
| 936 } | 942 } |
| 937 | 943 |
| 938 // static | 944 // static |
| 939 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { | 945 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { |
| 940 base::AutoLock lock(*list_lock_.Pointer()); | 946 base::AutoLock lock(*list_lock_.Pointer()); |
| 941 | 947 |
| 942 // TODO(jar): until this is working on XP, don't run the real test. | 948 // TODO(jar): until this is working on XP, don't run the real test. |
| 943 #if 0 | 949 #if 0 |
| 944 // Verify that we've at least shutdown/cleanup the major namesd threads. The | 950 // Verify that we've at least shutdown/cleanup the major namesd threads. The |
| 945 // caller should tell us how many thread shutdowns should have taken place by | 951 // caller should tell us how many thread shutdowns should have taken place by |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1029 pcursor = &cursor->next_retired_thread_data_; | 1035 pcursor = &cursor->next_retired_thread_data_; |
| 1030 cursor = cursor->next_retired_thread_data_; | 1036 cursor = cursor->next_retired_thread_data_; |
| 1031 } | 1037 } |
| 1032 } | 1038 } |
| 1033 | 1039 |
| 1034 return new ThreadData(sanitized_thread_name); | 1040 return new ThreadData(sanitized_thread_name); |
| 1035 } | 1041 } |
| 1036 | 1042 |
| 1037 //------------------------------------------------------------------------------ | 1043 //------------------------------------------------------------------------------ |
| 1038 TaskStopwatch::TaskStopwatch() | 1044 TaskStopwatch::TaskStopwatch() |
| 1039 : wallclock_duration_ms_(0), | 1045 : wallclock_duration_(), |
| 1040 current_thread_data_(NULL), | 1046 current_thread_data_(NULL), |
| 1041 excluded_duration_ms_(0), | 1047 excluded_duration_(), |
|
gab
2017/06/26 21:48:12
Remove default members from initialization list.
| |
| 1042 parent_(NULL) { | 1048 parent_(NULL) { |
| 1043 #if DCHECK_IS_ON() | 1049 #if DCHECK_IS_ON() |
| 1044 state_ = CREATED; | 1050 state_ = CREATED; |
| 1045 child_ = NULL; | 1051 child_ = NULL; |
| 1046 #endif | 1052 #endif |
| 1047 #if BUILDFLAG(USE_ALLOCATOR_SHIM) | 1053 #if BUILDFLAG(USE_ALLOCATOR_SHIM) |
| 1048 heap_tracking_enabled_ = | 1054 heap_tracking_enabled_ = |
| 1049 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled(); | 1055 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled(); |
| 1050 #endif | 1056 #endif |
| 1051 } | 1057 } |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 1078 if (parent_) { | 1084 if (parent_) { |
| 1079 DCHECK(parent_->state_ == RUNNING); | 1085 DCHECK(parent_->state_ == RUNNING); |
| 1080 DCHECK(parent_->child_ == NULL); | 1086 DCHECK(parent_->child_ == NULL); |
| 1081 parent_->child_ = this; | 1087 parent_->child_ = this; |
| 1082 } | 1088 } |
| 1083 #endif | 1089 #endif |
| 1084 current_thread_data_->current_stopwatch_ = this; | 1090 current_thread_data_->current_stopwatch_ = this; |
| 1085 } | 1091 } |
| 1086 | 1092 |
| 1087 void TaskStopwatch::Stop() { | 1093 void TaskStopwatch::Stop() { |
| 1088 const TrackedTime end_time = ThreadData::Now(); | 1094 const base::TimeTicks end_time = ThreadData::Now(); |
| 1089 #if DCHECK_IS_ON() | 1095 #if DCHECK_IS_ON() |
| 1090 DCHECK(state_ == RUNNING); | 1096 DCHECK(state_ == RUNNING); |
| 1091 state_ = STOPPED; | 1097 state_ = STOPPED; |
| 1092 DCHECK(child_ == NULL); | 1098 DCHECK(child_ == NULL); |
| 1093 #endif | 1099 #endif |
| 1094 #if BUILDFLAG(USE_ALLOCATOR_SHIM) | 1100 #if BUILDFLAG(USE_ALLOCATOR_SHIM) |
| 1095 if (heap_tracking_enabled_) | 1101 if (heap_tracking_enabled_) |
| 1096 heap_usage_.Stop(true); | 1102 heap_usage_.Stop(true); |
| 1097 #endif | 1103 #endif |
| 1098 | 1104 |
| 1099 if (!start_time_.is_null() && !end_time.is_null()) { | 1105 if (!start_time_.is_null() && !end_time.is_null()) { |
| 1100 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); | 1106 wallclock_duration_ = end_time - start_time_; |
| 1101 } | 1107 } |
| 1102 | 1108 |
| 1103 if (!current_thread_data_) | 1109 if (!current_thread_data_) |
| 1104 return; | 1110 return; |
| 1105 | 1111 |
| 1106 DCHECK(current_thread_data_->current_stopwatch_ == this); | 1112 DCHECK(current_thread_data_->current_stopwatch_ == this); |
| 1107 current_thread_data_->current_stopwatch_ = parent_; | 1113 current_thread_data_->current_stopwatch_ = parent_; |
| 1108 if (!parent_) | 1114 if (!parent_) |
| 1109 return; | 1115 return; |
| 1110 | 1116 |
| 1111 #if DCHECK_IS_ON() | 1117 #if DCHECK_IS_ON() |
| 1112 DCHECK(parent_->state_ == RUNNING); | 1118 DCHECK(parent_->state_ == RUNNING); |
| 1113 DCHECK(parent_->child_ == this); | 1119 DCHECK(parent_->child_ == this); |
| 1114 parent_->child_ = NULL; | 1120 parent_->child_ = NULL; |
| 1115 #endif | 1121 #endif |
| 1116 parent_->excluded_duration_ms_ += wallclock_duration_ms_; | 1122 parent_->excluded_duration_ += wallclock_duration_; |
| 1117 parent_ = NULL; | 1123 parent_ = NULL; |
| 1118 } | 1124 } |
| 1119 | 1125 |
| 1120 TrackedTime TaskStopwatch::StartTime() const { | 1126 base::TimeTicks TaskStopwatch::StartTime() const { |
| 1121 #if DCHECK_IS_ON() | 1127 #if DCHECK_IS_ON() |
| 1122 DCHECK(state_ != CREATED); | 1128 DCHECK(state_ != CREATED); |
| 1123 #endif | 1129 #endif |
| 1124 | 1130 |
| 1125 return start_time_; | 1131 return start_time_; |
| 1126 } | 1132 } |
| 1127 | 1133 |
| 1128 int32_t TaskStopwatch::RunDurationMs() const { | 1134 base::TimeDelta TaskStopwatch::RunDuration() const { |
| 1129 #if DCHECK_IS_ON() | 1135 #if DCHECK_IS_ON() |
| 1130 DCHECK(state_ == STOPPED); | 1136 DCHECK(state_ == STOPPED); |
| 1131 #endif | 1137 #endif |
| 1132 | 1138 |
| 1133 return wallclock_duration_ms_ - excluded_duration_ms_; | 1139 return wallclock_duration_ - excluded_duration_; |
| 1134 } | 1140 } |
| 1135 | 1141 |
| 1136 ThreadData* TaskStopwatch::GetThreadData() const { | 1142 ThreadData* TaskStopwatch::GetThreadData() const { |
| 1137 #if DCHECK_IS_ON() | 1143 #if DCHECK_IS_ON() |
| 1138 DCHECK(state_ != CREATED); | 1144 DCHECK(state_ != CREATED); |
| 1139 #endif | 1145 #endif |
| 1140 | 1146 |
| 1141 return current_thread_data_; | 1147 return current_thread_data_; |
| 1142 } | 1148 } |
| 1143 | 1149 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1189 #endif | 1195 #endif |
| 1190 } | 1196 } |
| 1191 | 1197 |
| 1192 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = | 1198 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = |
| 1193 default; | 1199 default; |
| 1194 | 1200 |
| 1195 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1201 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 1196 } | 1202 } |
| 1197 | 1203 |
| 1198 } // namespace tracked_objects | 1204 } // namespace tracked_objects |
| OLD | NEW |