Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/browser/download/rate_estimator.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 | |
| 9 using base::Time; | |
|
asanka
2013/05/13 17:37:12
Consider using TimeTicks? It's monotonically incre
| |
| 10 using base::TimeDelta; | |
| 11 | |
| 12 namespace content { | |
| 13 | |
| 14 namespace { | |
| 15 | |
| 16 static const int kDefaultBucketTimeSeconds = 1; | |
| 17 static const size_t kDefaultNumBuckets = 10; | |
| 18 | |
| 19 } // namespace | |
| 20 | |
| 21 RateEstimator::RateEstimator() | |
| 22 : byte_history_(kDefaultNumBuckets), | |
| 23 bucket_time_(TimeDelta::FromSeconds(kDefaultBucketTimeSeconds)), | |
| 24 oldest_index_(0), | |
| 25 bucket_count_(1) { | |
| 26 ResetBuckets(base::Time::Now()); | |
| 27 } | |
| 28 | |
| 29 RateEstimator::RateEstimator(base::TimeDelta bucket_time, | |
| 30 size_t num_buckets, | |
| 31 base::Time now) | |
| 32 : byte_history_(num_buckets), | |
| 33 bucket_time_(bucket_time), | |
| 34 oldest_index_(0), | |
| 35 bucket_count_(1) { | |
| 36 DCHECK(bucket_time_.InSeconds() > 0); | |
| 37 ResetBuckets(now); | |
| 38 } | |
| 39 | |
| 40 RateEstimator::~RateEstimator() { | |
| 41 } | |
| 42 | |
| 43 void RateEstimator::AddBytes(uint32 count) { | |
| 44 AddBytes(count, Time::Now()); | |
| 45 } | |
| 46 | |
| 47 void RateEstimator::AddBytes(uint32 count, Time now) { | |
| 48 ClearOldBuckets(now); | |
| 49 int64 seconds_since_oldest = (now - oldest_time_).InSeconds(); | |
| 50 DCHECK(seconds_since_oldest >= 0); | |
| 51 int64 delta_buckets = seconds_since_oldest / bucket_time_.InSeconds(); | |
| 52 DCHECK(delta_buckets >= 0); | |
| 53 size_t index_offset = static_cast<size_t>(delta_buckets); | |
| 54 DCHECK(index_offset <= byte_history_.size()); | |
| 55 int current_index = (oldest_index_ + delta_buckets) % byte_history_.size(); | |
| 56 byte_history_[current_index] += count; | |
| 57 } | |
| 58 | |
| 59 uint64 RateEstimator::GetBytesPerSecond() const { | |
| 60 return GetBytesPerSecond(Time::Now()); | |
| 61 } | |
| 62 | |
| 63 uint64 RateEstimator::GetBytesPerSecond(Time now) const { | |
| 64 const_cast<RateEstimator*>(this)->ClearOldBuckets(now); | |
| 65 // TODO(cbentzel): Maybe use derivative as well? | |
| 66 // TODO(cbentzel): Support fractional seconds for active bucket? | |
| 67 // We explicitly don't check for overflow here. If it happens, unsigned | |
| 68 // arithmetic at least guarantees behavior by wrapping around. The estimate | |
| 69 // will be off, but the code will still be valid. | |
| 70 uint64 total_bytes = 0; | |
| 71 for (size_t i = 0; i < bucket_count_; ++i) { | |
| 72 size_t index = (oldest_index_ + i) % byte_history_.size(); | |
| 73 total_bytes += byte_history_[index]; | |
| 74 } | |
| 75 return total_bytes / (bucket_count_ * bucket_time_.InSeconds()); | |
| 76 } | |
| 77 | |
| 78 void RateEstimator::ClearOldBuckets(Time now) { | |
| 79 int64 seconds_since_oldest = (now - oldest_time_).InSeconds(); | |
| 80 // Since now is based on wall clock, it's possible to go back in time.c | |
| 81 // Just clear all buckets in that case. | |
| 82 if (seconds_since_oldest < 0) { | |
| 83 ResetBuckets(now); | |
| 84 return; | |
| 85 } | |
| 86 | |
| 87 // If it's been a _very_ long time, reset buckets. | |
| 88 int64 delta_buckets = seconds_since_oldest / bucket_time_.InSeconds(); | |
| 89 DCHECK(delta_buckets >= 0); | |
| 90 // We probably need to do something different here, but size_t can be | |
| 91 // the full 64 bit unsigned. Maybe if we know that max_buckets is reasonable | |
| 92 // we can upcast and worry. | |
| 93 size_t delta_index = static_cast<size_t>(delta_buckets); | |
| 94 // If we are within the current window, keep the existing data. | |
| 95 if (delta_index < byte_history_.size()) { | |
| 96 bucket_count_ = delta_index + 1; | |
| 97 return; | |
| 98 } | |
| 99 | |
| 100 // If it's been long enough to clear out all data, just do that. | |
| 101 size_t extra_buckets = delta_index - byte_history_.size() + 1; | |
| 102 if (extra_buckets > byte_history_.size()) { | |
| 103 ResetBuckets(now); | |
| 104 return; | |
| 105 } | |
| 106 | |
| 107 // Advance each bucket, clearing things out. | |
| 108 bucket_count_ = byte_history_.size(); | |
| 109 for (size_t i = 0; i < extra_buckets; ++i) { | |
| 110 byte_history_[oldest_index_] = 0; | |
| 111 oldest_index_ = (oldest_index_ + 1) % byte_history_.size(); | |
| 112 // TODO(cbentzel): Is this prone to drift at all? | |
| 113 oldest_time_ = oldest_time_ + bucket_time_; | |
| 114 } | |
| 115 } | |
| 116 | |
| 117 void RateEstimator::ResetBuckets(Time now) { | |
| 118 for (size_t i = 0; i < byte_history_.size(); ++i) { | |
| 119 byte_history_[i] = 0; | |
| 120 } | |
| 121 oldest_index_ = 0; | |
| 122 bucket_count_ = 1; | |
| 123 oldest_time_ = now; | |
| 124 } | |
| 125 | |
| 126 } // namespace content | |
| OLD | NEW |