OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/scheduler/resource_dispatch_throttler.h" | |
6 | |
7 #include "base/memory/scoped_vector.h" | |
8 #include "content/common/resource_messages.h" | |
9 #include "content/test/fake_renderer_scheduler.h" | |
10 #include "testing/gtest/include/gtest/gtest.h" | |
11 | |
12 namespace content { | |
13 namespace { | |
14 | |
15 const uint32 kRequestsPerFlush = 4; | |
16 const double kFlushPeriodSeconds = 1.f / 60; | |
17 const int kRoutingId = 1; | |
18 | |
19 typedef ScopedVector<IPC::Message> ScopedMessages; | |
20 | |
21 int GetRequestId(const IPC::Message& msg) { | |
22 int request_id = -1; | |
23 switch (msg.type()) { | |
24 case ResourceHostMsg_RequestResource::ID: { | |
25 PickleIterator iter(msg); | |
26 int routing_id = -1; | |
27 if (!iter.ReadInt(&routing_id) || !iter.ReadInt(&request_id)) | |
28 NOTREACHED() << "Invalid id for resource request message."; | |
29 } break; | |
30 | |
31 case ResourceHostMsg_DidChangePriority::ID: | |
32 case ResourceHostMsg_ReleaseDownloadedFile::ID: | |
33 case ResourceHostMsg_CancelRequest::ID: | |
34 if (!PickleIterator(msg).ReadInt(&request_id)) | |
35 NOTREACHED() << "Invalid id for resource message."; | |
36 break; | |
37 | |
38 default: | |
39 NOTREACHED() << "Invalid message for resource throttling."; | |
40 break; | |
41 } | |
42 return request_id; | |
43 } | |
44 | |
45 class RendererSchedulerForTest : public FakeRendererScheduler { | |
46 public: | |
47 RendererSchedulerForTest() : high_priority_work_anticipated_(false) {} | |
48 ~RendererSchedulerForTest() override {} | |
49 | |
50 // RendererScheduler implementation: | |
51 bool IsHighPriorityWorkAnticipated() override { | |
52 return high_priority_work_anticipated_; | |
53 } | |
54 | |
55 void set_high_priority_work_anticipated(bool anticipated) { | |
56 high_priority_work_anticipated_ = anticipated; | |
57 } | |
58 | |
59 private: | |
60 bool high_priority_work_anticipated_; | |
61 }; | |
62 | |
63 } // namespace | |
64 | |
65 class ResourceDispatchThrottlerForTest : public ResourceDispatchThrottler { | |
66 public: | |
67 ResourceDispatchThrottlerForTest(IPC::Sender* sender, | |
68 RendererScheduler* scheduler) | |
69 : ResourceDispatchThrottler( | |
70 sender, | |
71 scheduler, | |
72 base::TimeDelta::FromSecondsD(kFlushPeriodSeconds), | |
73 kRequestsPerFlush), | |
74 flush_scheduled_(false) {} | |
75 ~ResourceDispatchThrottlerForTest() override {} | |
76 | |
77 void Advance(base::TimeDelta delta) { now_ += delta; } | |
78 | |
79 bool RunScheduledFlush() { | |
80 if (!flush_scheduled_) | |
81 return false; | |
82 | |
83 flush_scheduled_ = false; | |
84 Flush(); | |
85 return true; | |
86 } | |
87 | |
88 bool flush_scheduled() const { return flush_scheduled_; } | |
89 | |
90 private: | |
91 // ResourceDispatchThrottler overrides: | |
92 base::TimeTicks Now() const override { return now_; } | |
93 void ScheduleFlush() override { flush_scheduled_ = true; } | |
94 | |
95 base::TimeTicks now_; | |
96 bool flush_scheduled_; | |
97 }; | |
98 | |
99 class ResourceDispatchThrottlerTest : public testing::Test, public IPC::Sender { | |
100 public: | |
101 ResourceDispatchThrottlerTest() : last_request_id_(0) { | |
102 throttler_.reset(new ResourceDispatchThrottlerForTest(this, &scheduler_)); | |
103 } | |
104 ~ResourceDispatchThrottlerTest() override {} | |
105 | |
106 // IPC::Sender implementation: | |
107 bool Send(IPC::Message* msg) override { | |
108 sent_messages_.push_back(msg); | |
109 return true; | |
110 } | |
111 | |
112 protected: | |
113 void SetHighPriorityWorkAnticipated(bool anticipated) { | |
114 scheduler_.set_high_priority_work_anticipated(anticipated); | |
115 } | |
116 | |
117 void Advance(base::TimeDelta delta) { throttler_->Advance(delta); } | |
118 | |
119 bool RunScheduledFlush() { return throttler_->RunScheduledFlush(); } | |
120 | |
121 bool FlushScheduled() { return throttler_->flush_scheduled(); } | |
122 | |
123 bool RequestResource() { | |
124 ResourceHostMsg_Request request; | |
125 request.download_to_file = true; | |
126 return throttler_->Send(new ResourceHostMsg_RequestResource( | |
127 kRoutingId, ++last_request_id_, request)); | |
128 } | |
129 | |
130 bool RequestResourceSync() { | |
131 SyncLoadResult result; | |
132 return throttler_->Send(new ResourceHostMsg_SyncLoad( | |
133 kRoutingId, ++last_request_id_, ResourceHostMsg_Request(), &result)); | |
134 } | |
135 | |
136 void RequestResourcesUntilThrottled() { | |
137 SetHighPriorityWorkAnticipated(true); | |
138 GetAndResetSentMessageCount(); | |
139 RequestResource(); | |
140 while (GetAndResetSentMessageCount()) | |
141 RequestResource(); | |
142 } | |
143 | |
144 bool UpdateRequestPriority(int request_id, net::RequestPriority priority) { | |
145 return throttler_->Send( | |
146 new ResourceHostMsg_DidChangePriority(request_id, priority, 0)); | |
147 } | |
148 | |
149 bool ReleaseDownloadedFile(int request_id) { | |
150 return throttler_->Send( | |
151 new ResourceHostMsg_ReleaseDownloadedFile(request_id)); | |
152 } | |
153 | |
154 bool CancelRequest(int request_id) { | |
155 return throttler_->Send(new ResourceHostMsg_CancelRequest(request_id)); | |
156 } | |
157 | |
158 size_t GetAndResetSentMessageCount() { | |
159 size_t sent_message_count = sent_messages_.size(); | |
160 sent_messages_.clear(); | |
161 return sent_message_count; | |
162 } | |
163 | |
164 const IPC::Message* LastSentMessage() const { | |
165 return sent_messages_.empty() ? nullptr : sent_messages_.back(); | |
166 } | |
167 | |
168 int LastSentRequestId() const { | |
169 const IPC::Message* msg = LastSentMessage(); | |
170 if (!msg) | |
171 return -1; | |
172 | |
173 int routing_id = -1; | |
174 int request_id = -1; | |
175 PickleIterator iter(*msg); | |
176 CHECK(IPC::ReadParam(msg, &iter, &routing_id)); | |
177 CHECK(IPC::ReadParam(msg, &iter, &request_id)); | |
178 return request_id; | |
179 } | |
180 | |
181 int last_request_id() const { return last_request_id_; } | |
182 | |
183 ScopedMessages sent_messages_; | |
184 | |
185 private: | |
186 scoped_ptr<ResourceDispatchThrottlerForTest> throttler_; | |
187 RendererSchedulerForTest scheduler_; | |
188 int last_request_id_; | |
189 bool flush_scheduled_; | |
190 | |
191 DISALLOW_COPY_AND_ASSIGN(ResourceDispatchThrottlerTest); | |
192 }; | |
193 | |
194 TEST_F(ResourceDispatchThrottlerTest, NotThrottledByDefault) { | |
195 SetHighPriorityWorkAnticipated(false); | |
196 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) { | |
197 RequestResource(); | |
198 EXPECT_EQ(i + 1, sent_messages_.size()); | |
199 } | |
200 } | |
201 | |
202 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSendLimitNotReached) { | |
203 SetHighPriorityWorkAnticipated(true); | |
204 for (size_t i = 0; i < kRequestsPerFlush; ++i) { | |
205 RequestResource(); | |
206 EXPECT_EQ(i + 1, sent_messages_.size()); | |
207 } | |
208 } | |
209 | |
210 TEST_F(ResourceDispatchThrottlerTest, ThrottledWhenHighPriorityWork) { | |
211 SetHighPriorityWorkAnticipated(true); | |
212 for (size_t i = 0; i < kRequestsPerFlush; ++i) | |
213 RequestResource(); | |
214 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
215 | |
216 RequestResource(); | |
217 EXPECT_EQ(0U, sent_messages_.size()); | |
218 | |
219 EXPECT_TRUE(RunScheduledFlush()); | |
220 EXPECT_EQ(1U, sent_messages_.size()); | |
221 } | |
222 | |
223 TEST_F(ResourceDispatchThrottlerTest, | |
224 ThrottledWhenDeferredMessageQueueNonEmpty) { | |
225 SetHighPriorityWorkAnticipated(true); | |
226 for (size_t i = 0; i < kRequestsPerFlush; ++i) | |
227 RequestResource(); | |
228 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
229 | |
230 RequestResource(); | |
231 EXPECT_EQ(0U, sent_messages_.size()); | |
232 SetHighPriorityWorkAnticipated(false); | |
233 RequestResource(); | |
234 EXPECT_EQ(0U, sent_messages_.size()); | |
235 | |
236 EXPECT_TRUE(RunScheduledFlush()); | |
237 EXPECT_EQ(2U, sent_messages_.size()); | |
238 } | |
239 | |
240 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSufficientTimePassed) { | |
241 SetHighPriorityWorkAnticipated(true); | |
242 | |
243 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) { | |
244 Advance(base::TimeDelta::FromSecondsD(kFlushPeriodSeconds * 2)); | |
245 RequestResource(); | |
246 EXPECT_EQ(1U, GetAndResetSentMessageCount()); | |
247 EXPECT_FALSE(FlushScheduled()); | |
248 } | |
249 } | |
250 | |
251 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSyncMessage) { | |
252 SetHighPriorityWorkAnticipated(true); | |
253 | |
254 RequestResourceSync(); | |
255 EXPECT_EQ(1U, GetAndResetSentMessageCount()); | |
256 | |
257 // Saturate the queue. | |
258 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) | |
259 RequestResource(); | |
260 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
261 | |
262 // Synchronous messages should flush any previously throttled messages. | |
263 RequestResourceSync(); | |
264 EXPECT_EQ(1U + kRequestsPerFlush, GetAndResetSentMessageCount()); | |
265 RequestResourceSync(); | |
266 EXPECT_EQ(1U, GetAndResetSentMessageCount()); | |
267 | |
268 // Previously throttled messages should already have been flushed. | |
269 RunScheduledFlush(); | |
270 EXPECT_EQ(0U, GetAndResetSentMessageCount()); | |
271 } | |
272 | |
273 TEST_F(ResourceDispatchThrottlerTest, MultipleFlushes) { | |
274 SetHighPriorityWorkAnticipated(true); | |
275 for (size_t i = 0; i < kRequestsPerFlush * 4; ++i) | |
276 RequestResource(); | |
277 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
278 | |
279 for (size_t i = 0; i < 3; ++i) { | |
280 SCOPED_TRACE(i); | |
281 EXPECT_TRUE(RunScheduledFlush()); | |
282 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
283 } | |
284 | |
285 EXPECT_FALSE(FlushScheduled()); | |
286 EXPECT_EQ(0U, sent_messages_.size()); | |
287 } | |
288 | |
289 TEST_F(ResourceDispatchThrottlerTest, MultipleFlushesWhileReceiving) { | |
290 SetHighPriorityWorkAnticipated(true); | |
291 for (size_t i = 0; i < kRequestsPerFlush * 4; ++i) | |
292 RequestResource(); | |
293 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
294 | |
295 for (size_t i = 0; i < 3; ++i) { | |
296 SCOPED_TRACE(i); | |
297 EXPECT_TRUE(RunScheduledFlush()); | |
298 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
299 for (size_t j = 0; j < kRequestsPerFlush; ++j) | |
300 RequestResource(); | |
301 EXPECT_EQ(0U, sent_messages_.size()); | |
302 } | |
303 | |
304 for (size_t i = 0; i < 3; ++i) { | |
305 EXPECT_TRUE(RunScheduledFlush()); | |
306 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
307 } | |
308 | |
309 EXPECT_FALSE(FlushScheduled()); | |
310 EXPECT_EQ(0U, sent_messages_.size()); | |
311 } | |
312 | |
313 TEST_F(ResourceDispatchThrottlerTest, NonRequestsNeverTriggerThrottling) { | |
314 RequestResource(); | |
315 ASSERT_EQ(1U, GetAndResetSentMessageCount()); | |
316 | |
317 for (size_t i = 0; i < kRequestsPerFlush * 3; ++i) | |
318 UpdateRequestPriority(last_request_id(), net::HIGHEST); | |
319 EXPECT_EQ(kRequestsPerFlush * 3, sent_messages_.size()); | |
320 | |
321 RequestResource(); | |
322 EXPECT_EQ(1U + kRequestsPerFlush * 3, GetAndResetSentMessageCount()); | |
323 } | |
324 | |
325 TEST_F(ResourceDispatchThrottlerTest, NonRequestsDeferredWhenThrottling) { | |
326 RequestResource(); | |
327 ASSERT_EQ(1U, GetAndResetSentMessageCount()); | |
328 | |
329 RequestResourcesUntilThrottled(); | |
330 UpdateRequestPriority(last_request_id(), net::HIGHEST); | |
331 ReleaseDownloadedFile(last_request_id()); | |
332 CancelRequest(last_request_id()); | |
333 | |
334 EXPECT_TRUE(RunScheduledFlush()); | |
335 EXPECT_EQ(4U, GetAndResetSentMessageCount()); | |
336 EXPECT_FALSE(FlushScheduled()); | |
337 } | |
338 | |
339 TEST_F(ResourceDispatchThrottlerTest, MessageOrderingPreservedWhenThrottling) { | |
340 SetHighPriorityWorkAnticipated(true); | |
341 for (size_t i = 0; i < kRequestsPerFlush; ++i) | |
342 RequestResource(); | |
343 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); | |
344 | |
345 for (size_t i = 0; i < kRequestsPerFlush; ++i) { | |
346 RequestResource(); | |
347 UpdateRequestPriority(last_request_id(), net::HIGHEST); | |
348 CancelRequest(last_request_id() - 1); | |
349 } | |
350 ASSERT_EQ(0U, sent_messages_.size()); | |
351 | |
352 EXPECT_TRUE(RunScheduledFlush()); | |
353 ASSERT_EQ(kRequestsPerFlush * 3, sent_messages_.size()); | |
354 for (size_t i = 0; i < sent_messages_.size(); i += 3) { | |
355 SCOPED_TRACE(i); | |
356 const auto& request_msg = *sent_messages_[i]; | |
357 const auto& priority_msg = *sent_messages_[i + 1]; | |
358 const auto& cancel_msg = *sent_messages_[i + 2]; | |
359 | |
360 EXPECT_EQ(request_msg.type(), ResourceHostMsg_RequestResource::ID); | |
361 EXPECT_EQ(priority_msg.type(), ResourceHostMsg_DidChangePriority::ID); | |
362 EXPECT_EQ(cancel_msg.type(), ResourceHostMsg_CancelRequest::ID); | |
363 | |
364 EXPECT_EQ(GetRequestId(request_msg), GetRequestId(priority_msg)); | |
365 EXPECT_EQ(GetRequestId(request_msg) - 1, GetRequestId(cancel_msg)); | |
366 } | |
367 EXPECT_FALSE(FlushScheduled()); | |
368 } | |
369 | |
370 } // namespace content | |
OLD | NEW |