Index: chrome/browser/metrics/perf_provider_chromeos.cc |
diff --git a/chrome/browser/metrics/perf_provider_chromeos.cc b/chrome/browser/metrics/perf_provider_chromeos.cc |
index 95ff1504fc5c843cc2827563ef152cffab9c27cf..37441451f2d9bd6cec4d1c42bd19ec59a0f49d07 100644 |
--- a/chrome/browser/metrics/perf_provider_chromeos.cc |
+++ b/chrome/browser/metrics/perf_provider_chromeos.cc |
@@ -24,26 +24,10 @@ |
namespace { |
-// Default time in seconds between invocations of perf. |
-// This period is roughly 6.5 hours. |
-// This is chosen to be relatively prime with the number of seconds in: |
-// - one minute (60) |
-// - one hour (3600) |
-// - one day (86400) |
-const size_t kPerfCommandIntervalDefaultSeconds = 23093; |
- |
-// The first collection interval is different from the interval above. This is |
-// because we want to collect the first profile quickly after Chrome is started. |
-// If this period is too long, the user will log off and Chrome will be killed |
-// before it is triggered. The following 2 variables determine the upper and |
-// lower bound on the interval. |
-// The reason we do not always want to collect the initial profile after a fixed |
-// period is to not over-represent task X in the profile where task X always |
-// runs at a fixed period after start-up. By selecting a period randomly between |
-// a lower and upper bound, we will hopefully collect a more fair profile. |
-const size_t kPerfCommandStartIntervalLowerBoundMinutes = 10; |
- |
-const size_t kPerfCommandStartIntervalUpperBoundMinutes = 20; |
+// Partition time since the epoch into successive intervals of this size. In |
+// each interval that hasn't passed, pick a random time to collect a profile. |
+// This interval is four hours. |
+const size_t kPerfProfilingIntervalSeconds = 14400; |
tipp
2014/05/16 22:17:26
How'd you pick four hours? You should make some e
tipp
2014/05/16 22:17:26
It's clearer to write 4 * 60 * 60.
Simon Que
2014/05/20 20:47:35
Done.
Simon Que
2014/05/20 20:47:35
I'm not sure we have enough information to make th
|
// Default time in seconds perf is run for. |
const size_t kPerfCommandDurationDefaultSeconds = 2; |
@@ -110,16 +94,14 @@ class WindowedIncognitoObserver : public chrome::BrowserListObserver { |
}; |
PerfProvider::PerfProvider() |
- : weak_factory_(this) { |
- size_t collection_interval_minutes = base::RandInt( |
- kPerfCommandStartIntervalLowerBoundMinutes, |
- kPerfCommandStartIntervalUpperBoundMinutes); |
- ScheduleCollection(base::TimeDelta::FromMinutes(collection_interval_minutes)); |
+ : weak_factory_(this), |
Ilya Sherman
2014/05/16 21:48:57
The weak pointer factory should always be the last
Simon Que
2014/05/17 03:25:07
Done.
Simon Que
2014/05/19 22:45:01
Done.
|
+ login_time_(base::Time::Now()) { |
+ ScheduleCollection(); |
} |
PerfProvider::~PerfProvider() {} |
-bool PerfProvider::GetPerfData(std::vector<PerfDataProto>* perf_data) { |
+bool PerfProvider::GetPerfData(std::vector<SampledProfile>* perf_data) { |
DCHECK(CalledOnValidThread()); |
if (cached_perf_data_.empty()) { |
AddToPerfHistogram(NOT_READY_TO_UPLOAD); |
@@ -133,11 +115,29 @@ bool PerfProvider::GetPerfData(std::vector<PerfDataProto>* perf_data) { |
return true; |
} |
-void PerfProvider::ScheduleCollection(const base::TimeDelta& interval) { |
+void PerfProvider::ScheduleCollection() { |
DCHECK(CalledOnValidThread()); |
if (timer_.IsRunning()) |
return; |
+ // Determine the start of the current interval, which is a multiple of |
+ // |kPerfProfilingIntervalSeconds| since the epoch. |
+ base::Time now = base::Time::Now(); |
+ base::Time epoch = base::Time::UnixEpoch(); |
+ int64 seconds_since_epoch = (now - epoch).InSeconds(); |
tipp
2014/05/17 04:18:29
How about milliseconds everywhere instead of secon
Simon Que
2014/05/19 18:37:12
Will do.
|
+ int64 interval_start_time_s = seconds_since_epoch - |
+ (seconds_since_epoch % kPerfProfilingIntervalSeconds); |
+ |
+ // Pick a random time in the current interval. If it has already passed, pick |
+ // the same time in the next interval. |
Ilya Sherman
2014/05/16 21:48:57
So, the next interval might have two random times
Simon Que
2014/05/19 18:37:12
Yes, using the time of login / initialization woul
|
+ int64 scheduled_time_s = interval_start_time_s + |
+ base::RandGenerator(kPerfProfilingIntervalSeconds); |
tipp
2014/05/16 22:17:26
You want repeated calls to ScheduleCollection to b
Simon Que
2014/05/17 03:46:27
I'm not sure what your first sentence means. Shoul
tipp
2014/05/17 04:18:29
You want it to be deterministically random. For e
Simon Que
2014/05/19 22:45:01
Why can't we seed the random generator using the t
|
+ if (scheduled_time_s < seconds_since_epoch) { |
+ scheduled_time_s += kPerfProfilingIntervalSeconds; |
+ } |
+ |
+ base::TimeDelta interval = |
+ base::TimeDelta::FromSeconds(scheduled_time_s - seconds_since_epoch); |
timer_.Start(FROM_HERE, interval, this, |
&PerfProvider::CollectIfNecessaryAndReschedule); |
} |
@@ -180,8 +180,7 @@ void PerfProvider::CollectIfNecessary() { |
void PerfProvider::CollectIfNecessaryAndReschedule() { |
CollectIfNecessary(); |
- ScheduleCollection( |
- base::TimeDelta::FromSeconds(kPerfCommandIntervalDefaultSeconds)); |
+ ScheduleCollection(); |
} |
void PerfProvider::ParseProtoIfValid( |
@@ -200,10 +199,15 @@ void PerfProvider::ParseProtoIfValid( |
return; |
} |
- // Append a new PerfDataProto to the |cached_perf_data_| vector and swap in |
- // the contents. |
+ // Populate a profile collection protobuf with the collected perf data and |
+ // extra metadata. |
cached_perf_data_.resize(cached_perf_data_.size() + 1); |
- cached_perf_data_.back().Swap(&perf_data_proto); |
+ SampledProfile& collection_data = cached_perf_data_.back(); |
+ collection_data.set_trigger_event(SampledProfile::PERIODIC_COLLECTION); |
+ collection_data.set_ms_after_boot(perf_data_proto.timestamp_sec()); |
+ collection_data. |
+ set_ms_after_login((base::Time::Now() - login_time_).InSeconds()); |
Ilya Sherman
2014/05/16 21:48:57
The field names for the time fields claim the unit
Simon Que
2014/05/17 03:25:07
Done.
|
+ *collection_data.mutable_perf_data() = perf_data_proto; |
} |
} // namespace metrics |