| Index: components/domain_reliability/scheduler.cc
|
| diff --git a/components/domain_reliability/scheduler.cc b/components/domain_reliability/scheduler.cc
|
| index fb5f8577f5b7bc7a726a964266e636facdda0530..636130c9eb0f9a4fdcf16caa1c4bff3d4a04e997 100644
|
| --- a/components/domain_reliability/scheduler.cc
|
| +++ b/components/domain_reliability/scheduler.cc
|
| @@ -23,11 +23,6 @@
|
| const char* kMinimumUploadDelayFieldTrialName = "DomRel-MinimumUploadDelay";
|
| const char* kMaximumUploadDelayFieldTrialName = "DomRel-MaximumUploadDelay";
|
| const char* kUploadRetryIntervalFieldTrialName = "DomRel-UploadRetryInterval";
|
| -
|
| -// Fixed elements of backoff policy
|
| -const double kMultiplyFactor = 2.0;
|
| -const double kJitterFactor = 0.1;
|
| -const int64 kMaximumBackoffMs = 60 * 1000 * 1000;
|
|
|
| unsigned GetUnsignedFieldTrialValueOrDefault(std::string field_trial_name,
|
| unsigned default_value) {
|
| @@ -74,6 +69,7 @@
|
| const Params& params,
|
| const ScheduleUploadCallback& callback)
|
| : time_(time),
|
| + collectors_(num_collectors),
|
| params_(params),
|
| callback_(callback),
|
| upload_pending_(false),
|
| @@ -81,19 +77,6 @@
|
| upload_running_(false),
|
| collector_index_(kInvalidCollectorIndex),
|
| last_upload_finished_(false) {
|
| - backoff_policy_.num_errors_to_ignore = 0;
|
| - backoff_policy_.initial_delay_ms =
|
| - params.upload_retry_interval.InMilliseconds();
|
| - backoff_policy_.multiply_factor = kMultiplyFactor;
|
| - backoff_policy_.jitter_factor = kJitterFactor;
|
| - backoff_policy_.maximum_backoff_ms = kMaximumBackoffMs;
|
| - backoff_policy_.entry_lifetime_ms = 0;
|
| - backoff_policy_.always_use_initial_delay = false;
|
| -
|
| - for (size_t i = 0; i < num_collectors; ++i) {
|
| - collectors_.push_back(
|
| - new MockableTimeBackoffEntry(&backoff_policy_, time_));
|
| - }
|
| }
|
|
|
| DomainReliabilityScheduler::~DomainReliabilityScheduler() {}
|
| @@ -133,20 +116,30 @@
|
| VLOG(1) << "Upload to collector " << collector_index_
|
| << (success ? " succeeded." : " failed.");
|
|
|
| - net::BackoffEntry* backoff = collectors_[collector_index_];
|
| + CollectorState* collector = &collectors_[collector_index_];
|
| collector_index_ = kInvalidCollectorIndex;
|
| - backoff->InformOfRequest(success);
|
| -
|
| - if (!success) {
|
| +
|
| + if (success) {
|
| + collector->failures = 0;
|
| + } else {
|
| // Restore upload_pending_ and first_beacon_time_ to pre-upload state,
|
| // since upload failed.
|
| upload_pending_ = true;
|
| first_beacon_time_ = old_first_beacon_time_;
|
| - }
|
| -
|
| - last_upload_end_time_ = time_->NowTicks();
|
| +
|
| + ++collector->failures;
|
| + }
|
| +
|
| + base::TimeTicks now = time_->NowTicks();
|
| + base::TimeDelta retry_interval = GetUploadRetryInterval(collector->failures);
|
| + collector->next_upload = now + retry_interval;
|
| +
|
| + last_upload_end_time_ = now;
|
| last_upload_success_ = success;
|
| last_upload_finished_ = true;
|
| +
|
| + VLOG(1) << "Next upload to collector at least "
|
| + << retry_interval.InSeconds() << " seconds from now.";
|
|
|
| MaybeScheduleUpload();
|
| }
|
| @@ -177,11 +170,10 @@
|
|
|
| base::ListValue* collectors = new base::ListValue();
|
| for (size_t i = 0; i < collectors_.size(); ++i) {
|
| - const net::BackoffEntry* backoff = collectors_[i];
|
| + const CollectorState* state = &collectors_[i];
|
| base::DictionaryValue* value = new base::DictionaryValue();
|
| - value->SetInteger("failures", backoff->failure_count());
|
| - value->SetInteger("next_upload",
|
| - (backoff->GetReleaseTime() - now).InSeconds());
|
| + value->SetInteger("failures", state->failures);
|
| + value->SetInteger("next_upload", (state->next_upload - now).InSeconds());
|
| collectors->Append(value);
|
| }
|
| data->Set("collectors", collectors);
|
| @@ -189,9 +181,7 @@
|
| return data;
|
| }
|
|
|
| -void DomainReliabilityScheduler::MakeDeterministicForTesting() {
|
| - backoff_policy_.jitter_factor = 0.0;
|
| -}
|
| +DomainReliabilityScheduler::CollectorState::CollectorState() : failures(0) {}
|
|
|
| void DomainReliabilityScheduler::MaybeScheduleUpload() {
|
| if (!upload_pending_ || upload_scheduled_ || upload_running_)
|
| @@ -238,18 +228,16 @@
|
| size_t min_index = kInvalidCollectorIndex;
|
|
|
| for (size_t i = 0; i < collectors_.size(); ++i) {
|
| - net::BackoffEntry* backoff = collectors_[i];
|
| + CollectorState* collector = &collectors_[i];
|
| // If a collector is usable, use the first one in the list.
|
| - if (!backoff->ShouldRejectRequest()) {
|
| + if (collector->failures == 0 || collector->next_upload <= now) {
|
| min_time = now;
|
| min_index = i;
|
| break;
|
| - }
|
| -
|
| // If not, keep track of which will be usable soonest:
|
| - base::TimeTicks time = backoff->GetReleaseTime();
|
| - if (min_index == kInvalidCollectorIndex || time < min_time) {
|
| - min_time = time;
|
| + } else if (min_index == kInvalidCollectorIndex ||
|
| + collector->next_upload < min_time) {
|
| + min_time = collector->next_upload;
|
| min_index = i;
|
| }
|
| }
|
| @@ -259,4 +247,16 @@
|
| *collector_index_out = min_index;
|
| }
|
|
|
| +base::TimeDelta DomainReliabilityScheduler::GetUploadRetryInterval(
|
| + unsigned failures) {
|
| + if (failures == 0)
|
| + return base::TimeDelta::FromSeconds(0);
|
| + else {
|
| + // Don't back off more than 64x the original delay.
|
| + if (failures > 7)
|
| + failures = 7;
|
| + return params_.upload_retry_interval * (1 << (failures - 1));
|
| + }
|
| +}
|
| +
|
| } // namespace domain_reliability
|
|
|