Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(413)

Side by Side Diff: base/tracked_objects.cc

Issue 2956683002: chrome://profiler infrastructure uses base time types. (Closed)
Patch Set: Address nit. Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <ctype.h> 7 #include <ctype.h>
8 #include <limits.h> 8 #include <limits.h>
9 #include <stdlib.h> 9 #include <stdlib.h>
10 10
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
161 // TODO(jar): I need to see if this macro to optimize branching is worth using. 161 // TODO(jar): I need to see if this macro to optimize branching is worth using.
162 // 162 //
163 // This macro has no branching, so it is surely fast, and is equivalent to: 163 // This macro has no branching, so it is surely fast, and is equivalent to:
164 // if (assign_it) 164 // if (assign_it)
165 // target = source; 165 // target = source;
166 // We use a macro rather than a template to force this to inline. 166 // We use a macro rather than a template to force this to inline.
167 // Related code for calculating max is discussed on the web. 167 // Related code for calculating max is discussed on the web.
168 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 168 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
169 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) 169 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
170 170
171 void DeathData::RecordDurations(const int32_t queue_duration, 171 void DeathData::RecordDurations(const base::TimeDelta queue_duration,
172 const int32_t run_duration, 172 const base::TimeDelta run_duration,
173 const uint32_t random_number) { 173 const uint32_t random_number) {
174 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 174 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
175 if (count_ < INT_MAX) 175 if (count_ < INT_MAX)
176 base::subtle::NoBarrier_Store(&count_, count_ + 1); 176 base::subtle::NoBarrier_Store(&count_, count_ + 1);
177 177
178 int sample_probability_count = 178 int sample_probability_count =
179 base::subtle::NoBarrier_Load(&sample_probability_count_); 179 base::subtle::NoBarrier_Load(&sample_probability_count_);
180 if (sample_probability_count < INT_MAX) 180 if (sample_probability_count < INT_MAX)
181 ++sample_probability_count; 181 ++sample_probability_count;
182 base::subtle::NoBarrier_Store(&sample_probability_count_, 182 base::subtle::NoBarrier_Store(&sample_probability_count_,
183 sample_probability_count); 183 sample_probability_count);
184 184
185 base::subtle::NoBarrier_Store(&queue_duration_sum_, 185 base::subtle::NoBarrier_Store(
186 queue_duration_sum_ + queue_duration); 186 &queue_duration_sum_,
187 base::subtle::NoBarrier_Store(&run_duration_sum_, 187 queue_duration_sum_ + queue_duration.InMilliseconds());
188 run_duration_sum_ + run_duration); 188 base::subtle::NoBarrier_Store(
189 &run_duration_sum_, run_duration_sum_ + run_duration.InMilliseconds());
189 190
190 if (queue_duration_max() < queue_duration) 191 if (queue_duration_max() < queue_duration.InMilliseconds())
191 base::subtle::NoBarrier_Store(&queue_duration_max_, queue_duration); 192 base::subtle::NoBarrier_Store(&queue_duration_max_,
192 if (run_duration_max() < run_duration) 193 queue_duration.InMilliseconds());
193 base::subtle::NoBarrier_Store(&run_duration_max_, run_duration); 194 if (run_duration_max() < run_duration.InMilliseconds())
195 base::subtle::NoBarrier_Store(&run_duration_max_,
196 run_duration.InMilliseconds());
194 197
195 // Take a uniformly distributed sample over all durations ever supplied during 198 // Take a uniformly distributed sample over all durations ever supplied during
196 // the current profiling phase. 199 // the current profiling phase.
197 // The probability that we (instead) use this new sample is 200 // The probability that we (instead) use this new sample is
198 // 1/sample_probability_count_. This results in a completely uniform selection 201 // 1/sample_probability_count_. This results in a completely uniform selection
199 // of the sample (at least when we don't clamp sample_probability_count_... 202 // of the sample (at least when we don't clamp sample_probability_count_...
200 // but that should be inconsequentially likely). We ignore the fact that we 203 // but that should be inconsequentially likely). We ignore the fact that we
201 // correlated our selection of a sample to the run and queue times (i.e., we 204 // correlated our selection of a sample to the run and queue times (i.e., we
202 // used them to generate random_number). 205 // used them to generate random_number).
203 CHECK_GT(sample_probability_count, 0); 206 CHECK_GT(sample_probability_count, 0);
204 if (0 == (random_number % sample_probability_count)) { 207 if (0 == (random_number % sample_probability_count)) {
205 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); 208 base::subtle::NoBarrier_Store(&queue_duration_sample_,
206 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 209 queue_duration.InMilliseconds());
210 base::subtle::NoBarrier_Store(&run_duration_sample_,
211 run_duration.InMilliseconds());
207 } 212 }
208 } 213 }
209 214
210 void DeathData::RecordAllocations(const uint32_t alloc_ops, 215 void DeathData::RecordAllocations(const uint32_t alloc_ops,
211 const uint32_t free_ops, 216 const uint32_t free_ops,
212 const uint32_t allocated_bytes, 217 const uint32_t allocated_bytes,
213 const uint32_t freed_bytes, 218 const uint32_t freed_bytes,
214 const uint32_t alloc_overhead_bytes, 219 const uint32_t alloc_overhead_bytes,
215 const uint32_t max_allocated_bytes) { 220 const uint32_t max_allocated_bytes) {
216 #if !defined(ARCH_CPU_64_BITS) 221 #if !defined(ARCH_CPU_64_BITS)
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
526 531
527 ThreadData::~ThreadData() { 532 ThreadData::~ThreadData() {
528 } 533 }
529 534
530 void ThreadData::PushToHeadOfList() { 535 void ThreadData::PushToHeadOfList() {
531 // Toss in a hint of randomness (atop the uniniitalized value). 536 // Toss in a hint of randomness (atop the uniniitalized value).
532 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, 537 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
533 sizeof(random_number_)); 538 sizeof(random_number_));
534 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); 539 MSAN_UNPOISON(&random_number_, sizeof(random_number_));
535 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); 540 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0));
536 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); 541 random_number_ ^=
542 static_cast<uint32_t>((Now() - base::TimeTicks()).InMilliseconds());
537 543
538 DCHECK(!next_); 544 DCHECK(!next_);
539 base::AutoLock lock(*list_lock_.Pointer()); 545 base::AutoLock lock(*list_lock_.Pointer());
540 incarnation_count_for_pool_ = incarnation_counter_; 546 incarnation_count_for_pool_ = incarnation_counter_;
541 next_ = all_thread_data_list_head_; 547 next_ = all_thread_data_list_head_;
542 all_thread_data_list_head_ = this; 548 all_thread_data_list_head_ = this;
543 } 549 }
544 550
545 // static 551 // static
546 ThreadData* ThreadData::first() { 552 ThreadData* ThreadData::first() {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
671 // Lock since the map may get relocated now, and other threads sometimes 677 // Lock since the map may get relocated now, and other threads sometimes
672 // snapshot it (but they lock before copying it). 678 // snapshot it (but they lock before copying it).
673 base::AutoLock lock(map_lock_); 679 base::AutoLock lock(map_lock_);
674 birth_map_[location] = child; 680 birth_map_[location] = child;
675 } 681 }
676 682
677 return child; 683 return child;
678 } 684 }
679 685
680 void ThreadData::TallyADeath(const Births& births, 686 void ThreadData::TallyADeath(const Births& births,
681 int32_t queue_duration, 687 const base::TimeDelta queue_duration,
682 const TaskStopwatch& stopwatch) { 688 const TaskStopwatch& stopwatch) {
683 int32_t run_duration = stopwatch.RunDurationMs(); 689 base::TimeDelta run_duration = stopwatch.RunDuration();
684 690
685 // Stir in some randomness, plus add constant in case durations are zero. 691 // Stir in some randomness, plus add constant in case durations are zero.
686 const uint32_t kSomePrimeNumber = 2147483647; 692 const uint32_t kSomePrimeNumber = 2147483647;
687 random_number_ += queue_duration + run_duration + kSomePrimeNumber; 693 random_number_ += queue_duration.InMilliseconds() +
694 run_duration.InMilliseconds() + kSomePrimeNumber;
688 // An address is going to have some randomness to it as well ;-). 695 // An address is going to have some randomness to it as well ;-).
689 random_number_ ^= 696 random_number_ ^=
690 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); 697 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
691 698
692 DeathMap::iterator it = death_map_.find(&births); 699 DeathMap::iterator it = death_map_.find(&births);
693 DeathData* death_data; 700 DeathData* death_data;
694 if (it != death_map_.end()) { 701 if (it != death_map_.end()) {
695 death_data = &it->second; 702 death_data = &it->second;
696 } else { 703 } else {
697 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 704 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
736 return; 743 return;
737 ThreadData* current_thread_data = stopwatch.GetThreadData(); 744 ThreadData* current_thread_data = stopwatch.GetThreadData();
738 if (!current_thread_data) 745 if (!current_thread_data)
739 return; 746 return;
740 747
741 // Watch out for a race where status_ is changing, and hence one or both 748 // Watch out for a race where status_ is changing, and hence one or both
742 // of start_of_run or end_of_run is zero. In that case, we didn't bother to 749 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
743 // get a time value since we "weren't tracking" and we were trying to be 750 // get a time value since we "weren't tracking" and we were trying to be
744 // efficient by not calling for a genuine time value. For simplicity, we'll 751 // efficient by not calling for a genuine time value. For simplicity, we'll
745 // use a default zero duration when we can't calculate a true value. 752 // use a default zero duration when we can't calculate a true value.
746 TrackedTime start_of_run = stopwatch.StartTime(); 753 base::TimeTicks start_of_run = stopwatch.StartTime();
747 int32_t queue_duration = 0; 754 base::TimeDelta queue_duration;
748 if (!start_of_run.is_null()) { 755 if (!start_of_run.is_null()) {
749 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) 756 queue_duration = start_of_run - completed_task.EffectiveTimePosted();
750 .InMilliseconds();
751 } 757 }
752 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); 758 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
753 } 759 }
754 760
755 // static 761 // static
756 void ThreadData::TallyRunOnWorkerThreadIfTracking( 762 void ThreadData::TallyRunOnWorkerThreadIfTracking(
757 const Births* births, 763 const Births* births,
758 const TrackedTime& time_posted, 764 const base::TimeTicks& time_posted,
759 const TaskStopwatch& stopwatch) { 765 const TaskStopwatch& stopwatch) {
760 // Even if we have been DEACTIVATED, we will process any pending births so 766 // Even if we have been DEACTIVATED, we will process any pending births so
761 // that our data structures (which counted the outstanding births) remain 767 // that our data structures (which counted the outstanding births) remain
762 // consistent. 768 // consistent.
763 if (!births) 769 if (!births)
764 return; 770 return;
765 771
766 // TODO(jar): Support the option to coalesce all worker-thread activity under 772 // TODO(jar): Support the option to coalesce all worker-thread activity under
767 // one ThreadData instance that uses locks to protect *all* access. This will 773 // one ThreadData instance that uses locks to protect *all* access. This will
768 // reduce memory (making it provably bounded), but run incrementally slower 774 // reduce memory (making it provably bounded), but run incrementally slower
769 // (since we'll use locks on TallyABirth and TallyADeath). The good news is 775 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
770 // that the locks on TallyADeath will be *after* the worker thread has run, 776 // that the locks on TallyADeath will be *after* the worker thread has run,
771 // and hence nothing will be waiting for the completion (... besides some 777 // and hence nothing will be waiting for the completion (... besides some
772 // other thread that might like to run). Also, the worker threads tasks are 778 // other thread that might like to run). Also, the worker threads tasks are
773 // generally longer, and hence the cost of the lock may perchance be amortized 779 // generally longer, and hence the cost of the lock may perchance be amortized
774 // over the long task's lifetime. 780 // over the long task's lifetime.
775 ThreadData* current_thread_data = stopwatch.GetThreadData(); 781 ThreadData* current_thread_data = stopwatch.GetThreadData();
776 if (!current_thread_data) 782 if (!current_thread_data)
777 return; 783 return;
778 784
779 TrackedTime start_of_run = stopwatch.StartTime(); 785 base::TimeTicks start_of_run = stopwatch.StartTime();
780 int32_t queue_duration = 0; 786 base::TimeDelta queue_duration;
781 if (!start_of_run.is_null()) { 787 if (!start_of_run.is_null()) {
782 queue_duration = (start_of_run - time_posted).InMilliseconds(); 788 queue_duration = start_of_run - time_posted;
783 } 789 }
784 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); 790 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
785 } 791 }
786 792
787 // static 793 // static
788 void ThreadData::TallyRunInAScopedRegionIfTracking( 794 void ThreadData::TallyRunInAScopedRegionIfTracking(
789 const Births* births, 795 const Births* births,
790 const TaskStopwatch& stopwatch) { 796 const TaskStopwatch& stopwatch) {
791 // Even if we have been DEACTIVATED, we will process any pending births so 797 // Even if we have been DEACTIVATED, we will process any pending births so
792 // that our data structures (which counted the outstanding births) remain 798 // that our data structures (which counted the outstanding births) remain
793 // consistent. 799 // consistent.
794 if (!births) 800 if (!births)
795 return; 801 return;
796 802
797 ThreadData* current_thread_data = stopwatch.GetThreadData(); 803 ThreadData* current_thread_data = stopwatch.GetThreadData();
798 if (!current_thread_data) 804 if (!current_thread_data)
799 return; 805 return;
800 806
801 int32_t queue_duration = 0; 807 base::TimeDelta queue_duration;
802 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); 808 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
803 } 809 }
804 810
805 void ThreadData::SnapshotExecutedTasks( 811 void ThreadData::SnapshotExecutedTasks(
806 int current_profiling_phase, 812 int current_profiling_phase,
807 PhasedProcessDataSnapshotMap* phased_snapshots, 813 PhasedProcessDataSnapshotMap* phased_snapshots,
808 BirthCountMap* birth_counts) { 814 BirthCountMap* birth_counts) {
809 // Get copy of data, so that the data will not change during the iterations 815 // Get copy of data, so that the data will not change during the iterations
810 // and processing. 816 // and processing.
811 BirthMap birth_map; 817 BirthMap birth_map;
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
920 bool ThreadData::TrackingStatus() { 926 bool ThreadData::TrackingStatus() {
921 return base::subtle::Acquire_Load(&status_) > DEACTIVATED; 927 return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
922 } 928 }
923 929
924 // static 930 // static
925 void ThreadData::EnableProfilerTiming() { 931 void ThreadData::EnableProfilerTiming() {
926 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); 932 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
927 } 933 }
928 934
929 // static 935 // static
930 TrackedTime ThreadData::Now() { 936 base::TimeTicks ThreadData::Now() {
931 if (now_function_for_testing_) 937 if (now_function_for_testing_)
932 return TrackedTime::FromMilliseconds((*now_function_for_testing_)()); 938 return base::TimeTicks() +
939 base::TimeDelta::FromMilliseconds((*now_function_for_testing_)());
933 if (IsProfilerTimingEnabled() && TrackingStatus()) 940 if (IsProfilerTimingEnabled() && TrackingStatus())
934 return TrackedTime::Now(); 941 return base::TimeTicks::Now();
935 return TrackedTime(); // Super fast when disabled, or not compiled. 942 return base::TimeTicks(); // Super fast when disabled, or not compiled.
936 } 943 }
937 944
938 // static 945 // static
939 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { 946 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
940 base::AutoLock lock(*list_lock_.Pointer()); 947 base::AutoLock lock(*list_lock_.Pointer());
941 948
942 // TODO(jar): until this is working on XP, don't run the real test. 949 // TODO(jar): until this is working on XP, don't run the real test.
943 #if 0 950 #if 0
944 // Verify that we've at least shutdown/cleanup the major namesd threads. The 951 // Verify that we've at least shutdown/cleanup the major namesd threads. The
945 // caller should tell us how many thread shutdowns should have taken place by 952 // caller should tell us how many thread shutdowns should have taken place by
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1029 pcursor = &cursor->next_retired_thread_data_; 1036 pcursor = &cursor->next_retired_thread_data_;
1030 cursor = cursor->next_retired_thread_data_; 1037 cursor = cursor->next_retired_thread_data_;
1031 } 1038 }
1032 } 1039 }
1033 1040
1034 return new ThreadData(sanitized_thread_name); 1041 return new ThreadData(sanitized_thread_name);
1035 } 1042 }
1036 1043
1037 //------------------------------------------------------------------------------ 1044 //------------------------------------------------------------------------------
1038 TaskStopwatch::TaskStopwatch() 1045 TaskStopwatch::TaskStopwatch()
1039 : wallclock_duration_ms_(0), 1046 : wallclock_duration_(),
1040 current_thread_data_(NULL), 1047 current_thread_data_(NULL),
1041 excluded_duration_ms_(0), 1048 excluded_duration_(),
1042 parent_(NULL) { 1049 parent_(NULL) {
1043 #if DCHECK_IS_ON() 1050 #if DCHECK_IS_ON()
1044 state_ = CREATED; 1051 state_ = CREATED;
1045 child_ = NULL; 1052 child_ = NULL;
1046 #endif 1053 #endif
1047 #if BUILDFLAG(USE_ALLOCATOR_SHIM) 1054 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
1048 heap_tracking_enabled_ = 1055 heap_tracking_enabled_ =
1049 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled(); 1056 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
1050 #endif 1057 #endif
1051 } 1058 }
(...skipping 26 matching lines...) Expand all
1078 if (parent_) { 1085 if (parent_) {
1079 DCHECK(parent_->state_ == RUNNING); 1086 DCHECK(parent_->state_ == RUNNING);
1080 DCHECK(parent_->child_ == NULL); 1087 DCHECK(parent_->child_ == NULL);
1081 parent_->child_ = this; 1088 parent_->child_ = this;
1082 } 1089 }
1083 #endif 1090 #endif
1084 current_thread_data_->current_stopwatch_ = this; 1091 current_thread_data_->current_stopwatch_ = this;
1085 } 1092 }
1086 1093
1087 void TaskStopwatch::Stop() { 1094 void TaskStopwatch::Stop() {
1088 const TrackedTime end_time = ThreadData::Now(); 1095 const base::TimeTicks end_time = ThreadData::Now();
1089 #if DCHECK_IS_ON() 1096 #if DCHECK_IS_ON()
1090 DCHECK(state_ == RUNNING); 1097 DCHECK(state_ == RUNNING);
1091 state_ = STOPPED; 1098 state_ = STOPPED;
1092 DCHECK(child_ == NULL); 1099 DCHECK(child_ == NULL);
1093 #endif 1100 #endif
1094 #if BUILDFLAG(USE_ALLOCATOR_SHIM) 1101 #if BUILDFLAG(USE_ALLOCATOR_SHIM)
1095 if (heap_tracking_enabled_) 1102 if (heap_tracking_enabled_)
1096 heap_usage_.Stop(true); 1103 heap_usage_.Stop(true);
1097 #endif 1104 #endif
1098 1105
1099 if (!start_time_.is_null() && !end_time.is_null()) { 1106 if (!start_time_.is_null() && !end_time.is_null()) {
1100 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); 1107 wallclock_duration_ = end_time - start_time_;
1101 } 1108 }
1102 1109
1103 if (!current_thread_data_) 1110 if (!current_thread_data_)
1104 return; 1111 return;
1105 1112
1106 DCHECK(current_thread_data_->current_stopwatch_ == this); 1113 DCHECK(current_thread_data_->current_stopwatch_ == this);
1107 current_thread_data_->current_stopwatch_ = parent_; 1114 current_thread_data_->current_stopwatch_ = parent_;
1108 if (!parent_) 1115 if (!parent_)
1109 return; 1116 return;
1110 1117
1111 #if DCHECK_IS_ON() 1118 #if DCHECK_IS_ON()
1112 DCHECK(parent_->state_ == RUNNING); 1119 DCHECK(parent_->state_ == RUNNING);
1113 DCHECK(parent_->child_ == this); 1120 DCHECK(parent_->child_ == this);
1114 parent_->child_ = NULL; 1121 parent_->child_ = NULL;
1115 #endif 1122 #endif
1116 parent_->excluded_duration_ms_ += wallclock_duration_ms_; 1123 parent_->excluded_duration_ += wallclock_duration_;
1117 parent_ = NULL; 1124 parent_ = NULL;
1118 } 1125 }
1119 1126
1120 TrackedTime TaskStopwatch::StartTime() const { 1127 base::TimeTicks TaskStopwatch::StartTime() const {
1121 #if DCHECK_IS_ON() 1128 #if DCHECK_IS_ON()
1122 DCHECK(state_ != CREATED); 1129 DCHECK(state_ != CREATED);
1123 #endif 1130 #endif
1124 1131
1125 return start_time_; 1132 return start_time_;
1126 } 1133 }
1127 1134
1128 int32_t TaskStopwatch::RunDurationMs() const { 1135 base::TimeDelta TaskStopwatch::RunDuration() const {
1129 #if DCHECK_IS_ON() 1136 #if DCHECK_IS_ON()
1130 DCHECK(state_ == STOPPED); 1137 DCHECK(state_ == STOPPED);
1131 #endif 1138 #endif
1132 1139
1133 return wallclock_duration_ms_ - excluded_duration_ms_; 1140 return wallclock_duration_ - excluded_duration_;
1134 } 1141 }
1135 1142
1136 ThreadData* TaskStopwatch::GetThreadData() const { 1143 ThreadData* TaskStopwatch::GetThreadData() const {
1137 #if DCHECK_IS_ON() 1144 #if DCHECK_IS_ON()
1138 DCHECK(state_ != CREATED); 1145 DCHECK(state_ != CREATED);
1139 #endif 1146 #endif
1140 1147
1141 return current_thread_data_; 1148 return current_thread_data_;
1142 } 1149 }
1143 1150
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1189 #endif 1196 #endif
1190 } 1197 }
1191 1198
1192 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1199 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
1193 default; 1200 default;
1194 1201
1195 ProcessDataSnapshot::~ProcessDataSnapshot() { 1202 ProcessDataSnapshot::~ProcessDataSnapshot() {
1196 } 1203 }
1197 1204
1198 } // namespace tracked_objects 1205 } // namespace tracked_objects
OLDNEW
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698