Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(78)

Unified Diff: content/browser/download/rate_estimator.cc

Issue 14697023: downloads: Improve download rate estimation. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Improve unit tests Created 7 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/browser/download/rate_estimator.h ('k') | content/browser/download/rate_estimator_unittest.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/browser/download/rate_estimator.cc
diff --git a/content/browser/download/rate_estimator.cc b/content/browser/download/rate_estimator.cc
new file mode 100644
index 0000000000000000000000000000000000000000..94adec92826240eb917162a7e838b5f422466df4
--- /dev/null
+++ b/content/browser/download/rate_estimator.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/browser/download/rate_estimator.h"
+
+#include "base/logging.h"
+
+using base::Time;
asanka 2013/05/13 17:37:12 Consider using TimeTicks? It's monotonically incre
+using base::TimeDelta;
+
+namespace content {
+
+namespace {
+
+static const int kDefaultBucketTimeSeconds = 1;
+static const size_t kDefaultNumBuckets = 10;
+
+} // namespace
+
+RateEstimator::RateEstimator()
+ : byte_history_(kDefaultNumBuckets),
+ bucket_time_(TimeDelta::FromSeconds(kDefaultBucketTimeSeconds)),
+ oldest_index_(0),
+ bucket_count_(1) {
+ ResetBuckets(base::Time::Now());
+}
+
+RateEstimator::RateEstimator(base::TimeDelta bucket_time,
+ size_t num_buckets,
+ base::Time now)
+ : byte_history_(num_buckets),
+ bucket_time_(bucket_time),
+ oldest_index_(0),
+ bucket_count_(1) {
+ DCHECK(bucket_time_.InSeconds() > 0);
+ ResetBuckets(now);
+}
+
+RateEstimator::~RateEstimator() {
+}
+
+void RateEstimator::AddBytes(uint32 count) {
+ AddBytes(count, Time::Now());
+}
+
+void RateEstimator::AddBytes(uint32 count, Time now) {
+ ClearOldBuckets(now);
+ int64 seconds_since_oldest = (now - oldest_time_).InSeconds();
+ DCHECK(seconds_since_oldest >= 0);
+ int64 delta_buckets = seconds_since_oldest / bucket_time_.InSeconds();
+ DCHECK(delta_buckets >= 0);
+ size_t index_offset = static_cast<size_t>(delta_buckets);
+ DCHECK(index_offset <= byte_history_.size());
+ int current_index = (oldest_index_ + delta_buckets) % byte_history_.size();
+ byte_history_[current_index] += count;
+}
+
+uint64 RateEstimator::GetBytesPerSecond() const {
+ return GetBytesPerSecond(Time::Now());
+}
+
+uint64 RateEstimator::GetBytesPerSecond(Time now) const {
+ const_cast<RateEstimator*>(this)->ClearOldBuckets(now);
+ // TODO(cbentzel): Maybe use derivative as well?
+ // TODO(cbentzel): Support fractional seconds for active bucket?
+ // We explicitly don't check for overflow here. If it happens, unsigned
+ // arithmetic at least guarantees behavior by wrapping around. The estimate
+ // will be off, but the code will still be valid.
+ uint64 total_bytes = 0;
+ for (size_t i = 0; i < bucket_count_; ++i) {
+ size_t index = (oldest_index_ + i) % byte_history_.size();
+ total_bytes += byte_history_[index];
+ }
+ return total_bytes / (bucket_count_ * bucket_time_.InSeconds());
+}
+
+void RateEstimator::ClearOldBuckets(Time now) {
+ int64 seconds_since_oldest = (now - oldest_time_).InSeconds();
+ // Since now is based on wall clock, it's possible to go back in time.c
+ // Just clear all buckets in that case.
+ if (seconds_since_oldest < 0) {
+ ResetBuckets(now);
+ return;
+ }
+
+ // If it's been a _very_ long time, reset buckets.
+ int64 delta_buckets = seconds_since_oldest / bucket_time_.InSeconds();
+ DCHECK(delta_buckets >= 0);
+ // We probably need to do something different here, but size_t can be
+ // the full 64 bit unsigned. Maybe if we know that max_buckets is reasonable
+ // we can upcast and worry.
+ size_t delta_index = static_cast<size_t>(delta_buckets);
+ // If we are within the current window, keep the existing data.
+ if (delta_index < byte_history_.size()) {
+ bucket_count_ = delta_index + 1;
+ return;
+ }
+
+ // If it's been long enough to clear out all data, just do that.
+ size_t extra_buckets = delta_index - byte_history_.size() + 1;
+ if (extra_buckets > byte_history_.size()) {
+ ResetBuckets(now);
+ return;
+ }
+
+ // Advance each bucket, clearing things out.
+ bucket_count_ = byte_history_.size();
+ for (size_t i = 0; i < extra_buckets; ++i) {
+ byte_history_[oldest_index_] = 0;
+ oldest_index_ = (oldest_index_ + 1) % byte_history_.size();
+ // TODO(cbentzel): Is this prone to drift at all?
+ oldest_time_ = oldest_time_ + bucket_time_;
+ }
+}
+
+void RateEstimator::ResetBuckets(Time now) {
+ for (size_t i = 0; i < byte_history_.size(); ++i) {
+ byte_history_[i] = 0;
+ }
+ oldest_index_ = 0;
+ bucket_count_ = 1;
+ oldest_time_ = now;
+}
+
+} // namespace content
« no previous file with comments | « content/browser/download/rate_estimator.h ('k') | content/browser/download/rate_estimator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698