OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/scheduler/resource_dispatch_throttler.h" | |
6 | |
7 #include "base/auto_reset.h" | |
8 #include "base/debug/trace_event.h" | |
9 #include "content/common/resource_messages.h" | |
10 #include "content/renderer/scheduler/renderer_scheduler.h" | |
11 #include "ipc/ipc_message_macros.h" | |
12 | |
13 namespace content { | |
14 namespace { | |
15 | |
16 bool IsResourceRequest(const IPC::Message& msg) { | |
17 return msg.type() == ResourceHostMsg_RequestResource::ID; | |
18 } | |
19 | |
20 } // namespace | |
21 | |
22 ResourceDispatchThrottler::ResourceDispatchThrottler( | |
23 IPC::Sender* proxied_sender, | |
24 RendererScheduler* scheduler, | |
25 base::TimeDelta flush_period, | |
26 uint32 max_requests_per_flush) | |
27 : proxied_sender_(proxied_sender), | |
28 scheduler_(scheduler), | |
29 flush_period_(flush_period), | |
30 max_requests_per_flush_(max_requests_per_flush), | |
31 flush_timer_( | |
32 FROM_HERE, | |
33 flush_period_, | |
34 base::Bind(&ResourceDispatchThrottler::Flush, base::Unretained(this)), | |
35 false /* is_repeating */), | |
36 sent_requests_since_last_flush_(0) { | |
37 DCHECK(proxied_sender); | |
38 DCHECK(scheduler); | |
39 DCHECK_NE(flush_period_, base::TimeDelta()); | |
40 DCHECK(max_requests_per_flush_); | |
41 flush_timer_.SetTaskRunner(scheduler->LoadingTaskRunner()); | |
42 } | |
43 | |
44 ResourceDispatchThrottler::~ResourceDispatchThrottler() { | |
45 std::deque<IPC::Message*> throttled_messages; | |
46 throttled_messages.swap(throttled_messages_); | |
47 for (auto& message: throttled_messages) | |
davidben
2015/02/03 19:46:47
Nit: Taking a random sample from codesearch, put a
jdduke (slow)
2015/02/04 01:39:50
Done.
| |
48 ForwardMessage(message); | |
49 // There shouldn't be re-entrancy issues when forwarding an IPC, but validate | |
50 // as a safeguard. | |
51 DCHECK(throttled_messages_.empty()); | |
52 } | |
53 | |
54 bool ResourceDispatchThrottler::Send(IPC::Message* msg) { | |
55 thread_checker_.CalledOnValidThread(); | |
davidben
2015/02/03 19:46:47
DCHECK(....CalledOnValidThread());
jdduke (slow)
2015/02/04 01:39:50
Done.
| |
56 if (msg->is_sync()) | |
57 return ForwardMessage(msg); | |
58 | |
59 // Always defer message forwarding if there are pending messages, ensuring | |
60 // message dispatch ordering consistency. | |
61 if (!throttled_messages_.empty()) { | |
62 TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleMessage", | |
63 TRACE_EVENT_SCOPE_THREAD); | |
64 throttled_messages_.push_back(msg); | |
65 return true; | |
66 } | |
67 | |
68 if (!IsResourceRequest(*msg)) | |
69 return ForwardMessage(msg); | |
70 | |
71 if (!scheduler_->IsHighPriorityWorkAnticipated()) | |
72 return ForwardMessage(msg); | |
73 | |
74 if (Now() > (last_sent_request_time_ + flush_period_)) { | |
75 // If sufficient time has passed since the previous send, we can effectively | |
76 // mark the pipeline as flushed. | |
77 sent_requests_since_last_flush_ = 0; | |
78 return ForwardMessage(msg); | |
79 } | |
80 | |
81 if (sent_requests_since_last_flush_ < max_requests_per_flush_) | |
82 return ForwardMessage(msg); | |
83 | |
84 TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleRequest", | |
85 TRACE_EVENT_SCOPE_THREAD); | |
86 throttled_messages_.push_back(msg); | |
87 ScheduleFlush(); | |
88 return true; | |
89 } | |
90 | |
91 base::TimeTicks ResourceDispatchThrottler::Now() const { | |
92 return base::TimeTicks::Now(); | |
93 } | |
94 | |
95 void ResourceDispatchThrottler::ScheduleFlush() { | |
96 DCHECK(!flush_timer_.IsRunning()); | |
97 flush_timer_.Reset(); | |
98 } | |
99 | |
100 void ResourceDispatchThrottler::Flush() { | |
101 TRACE_EVENT1("loader", "ResourceDispatchThrottler::Flush", | |
102 "total_throttled_messages", throttled_messages_.size()); | |
103 sent_requests_since_last_flush_ = 0; | |
104 | |
105 // If high-priority work is no longer anticipated, dispatch can be safely | |
106 // accelerated. Avoid completely flushing in such case in the event that | |
107 // a large number of requests have been throttled. | |
108 uint32 max_requests = scheduler_->IsHighPriorityWorkAnticipated() | |
109 ? max_requests_per_flush_ | |
110 : max_requests_per_flush_ * 2; | |
111 | |
112 while (!throttled_messages_.empty() && | |
113 (sent_requests_since_last_flush_ < max_requests || | |
114 !IsResourceRequest(*throttled_messages_.front()))) { | |
115 IPC::Message* msg = throttled_messages_.front(); | |
116 throttled_messages_.pop_front(); | |
117 ForwardMessage(msg); | |
118 } | |
119 | |
120 if (!throttled_messages_.empty()) | |
121 ScheduleFlush(); | |
122 } | |
123 | |
124 bool ResourceDispatchThrottler::ForwardMessage(IPC::Message* msg) { | |
125 if (IsResourceRequest(*msg)) { | |
126 last_sent_request_time_ = Now(); | |
127 ++sent_requests_since_last_flush_; | |
128 } | |
129 return proxied_sender_->Send(msg); | |
130 } | |
131 | |
132 } // namespace content | |
OLD | NEW |