OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/resource_dispatch_throttler.h" | |
6 | |
7 #include "base/auto_reset.h" | |
8 #include "base/debug/trace_event.h" | |
9 #include "content/common/resource_messages.h" | |
10 #include "content/renderer/scheduler/renderer_scheduler.h" | |
11 #include "ipc/ipc_message_macros.h" | |
12 | |
13 namespace content { | |
14 namespace { | |
15 | |
16 int GetRequestId(const IPC::Message& msg) { | |
17 int request_id = -1; | |
18 switch (msg.type()) { | |
19 case ResourceHostMsg_RequestResource::ID: { | |
20 PickleIterator iter(msg); | |
21 int routing_id = -1; | |
22 if (!iter.ReadInt(&routing_id) || !iter.ReadInt(&request_id)) | |
23 NOTREACHED() << "Invalid id for resource request message."; | |
24 } break; | |
25 | |
26 case ResourceHostMsg_DidChangePriority::ID: | |
27 case ResourceHostMsg_ReleaseDownloadedFile::ID: | |
28 case ResourceHostMsg_CancelRequest::ID: | |
29 if (!PickleIterator(msg).ReadInt(&request_id)) | |
30 NOTREACHED() << "Invalid id for resource message."; | |
31 break; | |
32 | |
33 default: | |
34 NOTREACHED() << "Invalid message for resource throttling."; | |
35 break; | |
36 } | |
37 return request_id; | |
38 } | |
39 | |
40 bool UpdateRequestPriority(const IPC::Message& priority_msg, | |
41 IPC::Message* request_msg) { | |
42 ResourceHostMsg_DidChangePriority::Param priority_params; | |
43 if (!ResourceHostMsg_DidChangePriority::Read(&priority_msg, &priority_params)) | |
44 return false; | |
45 | |
46 ResourceHostMsg_RequestResource::Param request_params; | |
47 if (!ResourceHostMsg_RequestResource::Read(request_msg, &request_params)) | |
48 return false; | |
49 | |
50 int routing_id = get<0>(request_params); | |
51 int request_id = get<1>(request_params); | |
52 DCHECK_EQ(request_id, get<0>(priority_params)); | |
53 | |
54 ResourceHostMsg_Request& updated_request = get<2>(request_params); | |
55 updated_request.priority = get<1>(priority_params); | |
56 *request_msg = | |
57 ResourceHostMsg_RequestResource(routing_id, request_id, updated_request); | |
58 return true; | |
59 } | |
60 | |
61 bool UpdateRequestToReleaseDownloadedFile(IPC::Message* request_msg) { | |
62 ResourceHostMsg_RequestResource::Param request_params; | |
63 if (!ResourceHostMsg_RequestResource::Read(request_msg, &request_params)) | |
64 return false; | |
65 | |
66 int routing_id = get<0>(request_params); | |
67 int request_id = get<1>(request_params); | |
68 ResourceHostMsg_Request& updated_request = get<2>(request_params); | |
69 // No need to update the request message if no file download was specified. | |
70 if (!updated_request.download_to_file) | |
71 return true; | |
72 | |
73 updated_request.download_to_file = false; | |
74 *request_msg = | |
75 ResourceHostMsg_RequestResource(routing_id, request_id, updated_request); | |
76 return true; | |
77 } | |
78 | |
79 } // namespace | |
80 | |
81 ResourceDispatchThrottler::ResourceDispatchThrottler( | |
82 IPC::Sender* proxied_sender, | |
83 RendererScheduler* scheduler, | |
84 base::TimeDelta flush_period, | |
85 uint32 max_requests_per_flush) | |
86 : proxied_sender_(proxied_sender), | |
87 scheduler_(scheduler), | |
88 flush_period_(flush_period), | |
89 max_requests_per_flush_(max_requests_per_flush), | |
90 flush_timer_( | |
91 FROM_HERE, | |
92 flush_period_, | |
93 base::Bind(&ResourceDispatchThrottler::Flush, base::Unretained(this)), | |
94 false /* is_repeating */), | |
95 sent_requests_since_last_flush_(0), | |
96 is_forwarding_request_(false) { | |
97 DCHECK(proxied_sender); | |
98 DCHECK(scheduler); | |
99 DCHECK(flush_period_ != base::TimeDelta()); | |
100 DCHECK(max_requests_per_flush_); | |
101 flush_timer_.SetTaskRunner(scheduler->DefaultTaskRunner()); | |
Sami
2015/01/27 14:06:06
Should we make this use the loading task runner in
jdduke (slow)
2015/01/27 16:59:09
Done.
| |
102 } | |
103 | |
104 ResourceDispatchThrottler::~ResourceDispatchThrottler() { | |
105 for (auto& request : throttled_requests_) | |
106 ForwardRequest(request.second); | |
107 throttled_requests_.clear(); | |
108 } | |
109 | |
110 bool ResourceDispatchThrottler::Send(IPC::Message* msg) { | |
111 thread_checker_.CalledOnValidThread(); | |
112 switch (msg->type()) { | |
113 case ResourceHostMsg_RequestResource::ID: | |
Sami
2015/01/27 14:06:06
Could we use IPC_BEGIN_MESSAGE_MAP etc. here?
jdduke (slow)
2015/01/27 16:59:09
Hmm, I don't want to deserialize the whole message
Sami
2015/01/27 17:50:12
Oh, right, those macros are clearly meant more for
| |
114 return OnRequestResource(msg); | |
115 | |
116 case ResourceHostMsg_DidChangePriority::ID: | |
117 return OnDidChangePriority(msg); | |
118 | |
119 case ResourceHostMsg_ReleaseDownloadedFile::ID: | |
120 return OnReleaseDownloadedFile(msg); | |
121 | |
122 case ResourceHostMsg_CancelRequest::ID: | |
123 return OnCancelRequest(msg); | |
124 | |
125 default: | |
126 return proxied_sender_->Send(msg); | |
127 } | |
128 } | |
129 | |
130 base::TimeTicks ResourceDispatchThrottler::Now() const { | |
131 return base::TimeTicks::Now(); | |
132 } | |
133 | |
134 void ResourceDispatchThrottler::ScheduleFlush() { | |
135 DCHECK(!flush_timer_.IsRunning()); | |
136 flush_timer_.Reset(); | |
137 } | |
138 | |
139 void ResourceDispatchThrottler::Flush() { | |
140 TRACE_EVENT1("loader", "ResourceDispatchThrottler::Flush", | |
141 "total_throttled_requests", throttled_requests_.size()); | |
142 sent_requests_since_last_flush_ = 0; | |
143 | |
144 // If high-priority work is no longer anticipated, dispatch can be safely | |
145 // accelerated. Avoid completely flushing in such case in the event that | |
146 // a large number of requests have been throttled. | |
147 uint32 max_requests = scheduler_->IsHighPriorityWorkAnticipated() | |
148 ? max_requests_per_flush_ | |
149 : max_requests_per_flush_ * 2; | |
150 | |
151 while (!throttled_requests_.empty() && | |
152 sent_requests_since_last_flush_ < max_requests) { | |
153 auto request_it = throttled_requests_.begin(); | |
154 scoped_ptr<IPC::Message> forwarded_msg(request_it->second); | |
155 throttled_requests_.erase(request_it); | |
156 ForwardRequest(forwarded_msg.release()); | |
157 } | |
158 | |
159 if (!throttled_requests_.empty()) | |
160 ScheduleFlush(); | |
161 } | |
162 | |
163 bool ResourceDispatchThrottler::ForwardRequest(IPC::Message* msg) { | |
164 DCHECK(!is_forwarding_request_); | |
165 base::AutoReset<bool> is_forwarding_request_resetter(&is_forwarding_request_, | |
166 true); | |
167 last_sent_request_time_ = Now(); | |
168 ++sent_requests_since_last_flush_; | |
169 return proxied_sender_->Send(msg); | |
170 } | |
171 | |
172 bool ResourceDispatchThrottler::OnRequestResource(IPC::Message* msg) { | |
173 DCHECK(!is_forwarding_request_); | |
174 const int request_id = GetRequestId(*msg); | |
175 | |
176 // Shift responsibility for handling an invalid request ID downstream. | |
177 if (request_id == -1) | |
178 return ForwardRequest(msg); | |
179 | |
180 if (!throttled_requests_.empty()) { | |
181 // Valid request ids must be monotonically increasing. | |
Sami
2015/01/27 14:06:06
Do we need to worry about wrapping?
jdduke (slow)
2015/01/27 16:59:09
Yeah, I raised this concern to mmenke@ last week.
Sami
2015/01/27 17:50:12
Right. It seems simple enough to use an int64 here
| |
182 DCHECK_LT(throttled_requests_.rbegin()->first, request_id); | |
183 throttled_requests_.insert(std::make_pair(request_id, msg)); | |
184 TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleRequest", | |
185 TRACE_EVENT_SCOPE_THREAD); | |
186 return true; | |
187 } | |
188 | |
189 if (!scheduler_->IsHighPriorityWorkAnticipated()) | |
190 return ForwardRequest(msg); | |
191 | |
192 if (Now() > (last_sent_request_time_ + flush_period_)) { | |
193 // If sufficient time has passed since the previous send, we can effectively | |
194 // mark the pipeline as flushed. | |
195 sent_requests_since_last_flush_ = 0; | |
196 return ForwardRequest(msg); | |
197 } | |
198 | |
199 if (sent_requests_since_last_flush_ < max_requests_per_flush_) | |
200 return ForwardRequest(msg); | |
201 | |
202 TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleRequest", | |
203 TRACE_EVENT_SCOPE_THREAD); | |
204 throttled_requests_.insert(std::make_pair(request_id, msg)); | |
205 ScheduleFlush(); | |
206 return true; | |
207 } | |
208 | |
209 bool ResourceDispatchThrottler::OnDidChangePriority(IPC::Message* msg) { | |
210 scoped_ptr<IPC::Message> scoped_msg(msg); | |
211 auto request_it = throttled_requests_.find(GetRequestId(*msg)); | |
212 if (request_it == throttled_requests_.end()) | |
213 return proxied_sender_->Send(scoped_msg.release()); | |
214 | |
215 return UpdateRequestPriority(*scoped_msg, request_it->second); | |
216 } | |
217 | |
218 bool ResourceDispatchThrottler::OnReleaseDownloadedFile(IPC::Message* msg) { | |
219 scoped_ptr<IPC::Message> scoped_msg(msg); | |
220 auto request_it = throttled_requests_.find(GetRequestId(*msg)); | |
221 if (request_it == throttled_requests_.end()) | |
222 return proxied_sender_->Send(scoped_msg.release()); | |
223 | |
224 // TODO(jdduke): Should this simply cancel the outstanding request? | |
225 return UpdateRequestToReleaseDownloadedFile(request_it->second); | |
226 } | |
227 | |
228 bool ResourceDispatchThrottler::OnCancelRequest(IPC::Message* msg) { | |
229 scoped_ptr<IPC::Message> scoped_msg(msg); | |
230 auto request_it = throttled_requests_.find(GetRequestId(*msg)); | |
231 if (request_it == throttled_requests_.end()) | |
232 return proxied_sender_->Send(scoped_msg.release()); | |
233 | |
234 scoped_ptr<IPC::Message> cancelled_msg(request_it->second); | |
235 throttled_requests_.erase(request_it); | |
236 return true; | |
237 } | |
238 | |
239 } // namespace content | |
OLD | NEW |