OLD | NEW |
| (Empty) |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/browser/media/capture/smooth_event_sampler.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/trace_event/trace_event.h" | |
10 | |
11 namespace content { | |
12 | |
13 namespace { | |
14 | |
15 // The maximum amount of time that can elapse before considering unchanged | |
16 // content as dirty for the purposes of timer-based overdue sampling. This is | |
17 // the same value found in cc::FrameRateCounter. | |
18 const int kOverdueDirtyThresholdMillis = 250; // 4 FPS | |
19 | |
20 } // anonymous namespace | |
21 | |
22 SmoothEventSampler::SmoothEventSampler(base::TimeDelta min_capture_period, | |
23 int redundant_capture_goal) | |
24 : redundant_capture_goal_(redundant_capture_goal), | |
25 overdue_sample_count_(0), | |
26 token_bucket_(base::TimeDelta::Max()) { | |
27 SetMinCapturePeriod(min_capture_period); | |
28 } | |
29 | |
30 void SmoothEventSampler::SetMinCapturePeriod(base::TimeDelta period) { | |
31 DCHECK_GT(period, base::TimeDelta()); | |
32 min_capture_period_ = period; | |
33 token_bucket_capacity_ = period + period / 2; | |
34 token_bucket_ = std::min(token_bucket_capacity_, token_bucket_); | |
35 } | |
36 | |
37 void SmoothEventSampler::ConsiderPresentationEvent(base::TimeTicks event_time) { | |
38 DCHECK(!event_time.is_null()); | |
39 | |
40 // Add tokens to the bucket based on advancement in time. Then, re-bound the | |
41 // number of tokens in the bucket. Overflow occurs when there is too much | |
42 // time between events (a common case), or when RecordSample() is not being | |
43 // called often enough (a bug). On the other hand, if RecordSample() is being | |
44 // called too often (e.g., as a reaction to IsOverdueForSamplingAt()), the | |
45 // bucket will underflow. | |
46 if (!current_event_.is_null()) { | |
47 if (current_event_ < event_time) { | |
48 token_bucket_ += event_time - current_event_; | |
49 if (token_bucket_ > token_bucket_capacity_) | |
50 token_bucket_ = token_bucket_capacity_; | |
51 } | |
52 TRACE_COUNTER1("gpu.capture", | |
53 "MirroringTokenBucketUsec", | |
54 std::max<int64>(0, token_bucket_.InMicroseconds())); | |
55 } | |
56 current_event_ = event_time; | |
57 } | |
58 | |
59 bool SmoothEventSampler::ShouldSample() const { | |
60 return token_bucket_ >= min_capture_period_; | |
61 } | |
62 | |
63 void SmoothEventSampler::RecordSample() { | |
64 token_bucket_ -= min_capture_period_; | |
65 if (token_bucket_ < base::TimeDelta()) | |
66 token_bucket_ = base::TimeDelta(); | |
67 TRACE_COUNTER1("gpu.capture", | |
68 "MirroringTokenBucketUsec", | |
69 std::max<int64>(0, token_bucket_.InMicroseconds())); | |
70 | |
71 if (HasUnrecordedEvent()) { | |
72 last_sample_ = current_event_; | |
73 overdue_sample_count_ = 0; | |
74 } else { | |
75 ++overdue_sample_count_; | |
76 } | |
77 } | |
78 | |
79 bool SmoothEventSampler::IsOverdueForSamplingAt(base::TimeTicks event_time) | |
80 const { | |
81 DCHECK(!event_time.is_null()); | |
82 | |
83 if (!HasUnrecordedEvent() && overdue_sample_count_ >= redundant_capture_goal_) | |
84 return false; // Not dirty. | |
85 | |
86 if (last_sample_.is_null()) | |
87 return true; | |
88 | |
89 // If we're dirty but not yet old, then we've recently gotten updates, so we | |
90 // won't request a sample just yet. | |
91 base::TimeDelta dirty_interval = event_time - last_sample_; | |
92 return dirty_interval >= | |
93 base::TimeDelta::FromMilliseconds(kOverdueDirtyThresholdMillis); | |
94 } | |
95 | |
96 bool SmoothEventSampler::HasUnrecordedEvent() const { | |
97 return !current_event_.is_null() && current_event_ != last_sample_; | |
98 } | |
99 | |
100 } // namespace content | |
OLD | NEW |