Index: content/renderer/resource_dispatch_throttler_unittest.cc |
diff --git a/content/renderer/resource_dispatch_throttler_unittest.cc b/content/renderer/resource_dispatch_throttler_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..12478f7469824b38c15e4145b357ea3c2b2b3e63 |
--- /dev/null |
+++ b/content/renderer/resource_dispatch_throttler_unittest.cc |
@@ -0,0 +1,345 @@ |
+// Copyright 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/renderer/resource_dispatch_throttler.h" |
+ |
+#include "base/memory/scoped_vector.h" |
+#include "content/common/resource_messages.h" |
+#include "content/test/fake_renderer_scheduler.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+ |
+namespace content { |
+namespace { |
+ |
+const uint32 kRequestsPerFlush = 4; |
+const double kFlushPeriodSeconds = 1.f / 60; |
+const int kRoutingId = 1; |
+ |
+typedef ScopedVector<IPC::Message> ScopedMessages; |
+ |
+class SenderSink : public IPC::Sender { |
+ public: |
+ SenderSink(ScopedMessages* sent_messages) : sent_messages_(sent_messages) {} |
+ |
+ // IPC::Sender implementation: |
+ bool Send(IPC::Message* msg) override { |
+ sent_messages_->push_back(msg); |
+ return true; |
+ } |
+ |
+ private: |
+ ScopedMessages* sent_messages_; |
+}; |
+ |
+} // namespace |
+ |
+class ResourceDispatchThrottlerTest : public testing::Test, |
+ public FakeRendererScheduler, |
+ public ResourceDispatchThrottler { |
+ public: |
+ ResourceDispatchThrottlerTest() |
+ : ResourceDispatchThrottler( |
+ &sender_sink_, |
+ this, |
+ base::TimeDelta::FromSecondsD(kFlushPeriodSeconds), |
+ kRequestsPerFlush), |
+ sender_sink_(&sent_messages_), |
+ flush_scheduled_(false), |
+ anticipate_(false), |
+ last_request_id_(0) {} |
+ ~ResourceDispatchThrottlerTest() override {} |
+ |
+ // RendererScheduler implementation: |
+ bool ShouldAnticipateHighPriorityWork() override { return anticipate_; } |
+ |
+ protected: |
+ base::TimeTicks Now() const override { return now_; } |
+ |
+ void ScheduleFlush() override { flush_scheduled_ = true; } |
+ |
+ void SetShouldAnticipateHighPriorityWork(bool anticipate) { |
+ anticipate_ = anticipate; |
+ } |
+ |
+ void Advance(base::TimeDelta delta) { now_ += delta; } |
+ |
+ bool RunScheduledFlush() { |
+ if (!flush_scheduled_) |
+ return false; |
+ |
+ flush_scheduled_ = false; |
+ Flush(); |
+ return true; |
+ } |
+ |
+ bool RequestResource() { |
+ ResourceHostMsg_Request request; |
+ request.download_to_file = true; |
+ return Send(new ResourceHostMsg_RequestResource( |
+ kRoutingId, ++last_request_id_, request)); |
+ } |
+ |
+ void RequestResourcesUntilThrottled() { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ GetAndResetSentMessageCount(); |
+ for (RequestResource(); GetAndResetSentMessageCount(); RequestResource()) { |
+ } |
+ } |
+ |
+ bool UpdateRequestPriority(int request_id, net::RequestPriority priority) { |
+ return Send(new ResourceHostMsg_DidChangePriority(request_id, priority, 0)); |
+ } |
+ |
+ bool ReleaseDownloadedFile(int request_id) { |
+ return Send(new ResourceHostMsg_ReleaseDownloadedFile(request_id)); |
+ } |
+ |
+ bool CancelRequest(int request_id) { |
+ return Send(new ResourceHostMsg_CancelRequest(request_id)); |
+ } |
+ |
+ bool RequestResourceSync() { |
+ SyncLoadResult result; |
+ return Send(new ResourceHostMsg_SyncLoad( |
+ kRoutingId, ++last_request_id_, ResourceHostMsg_Request(), &result)); |
+ } |
+ |
+ size_t GetAndResetSentMessageCount() { |
+ size_t sent_message_count = sent_messages_.size(); |
+ sent_messages_.clear(); |
+ return sent_message_count; |
+ } |
+ |
+ const IPC::Message* LastSentMessage() const { |
+ return sent_messages_.empty() ? nullptr : sent_messages_.back(); |
+ } |
+ |
+ int LastSentRequestId() const { |
+ const IPC::Message* msg = LastSentMessage(); |
+ if (!msg) |
+ return -1; |
+ |
+ int routing_id = -1; |
+ int request_id = -1; |
+ PickleIterator iter(*msg); |
+ CHECK(IPC::ReadParam(msg, &iter, &routing_id)); |
+ CHECK(IPC::ReadParam(msg, &iter, &request_id)); |
+ return request_id; |
+ } |
+ |
+ int last_request_id() const { return last_request_id_; } |
+ |
+ ScopedMessages sent_messages_; |
+ |
+ private: |
+ SenderSink sender_sink_; |
+ base::MessageLoopForUI message_loop_; |
+ base::TimeTicks now_; |
+ bool flush_scheduled_; |
+ bool anticipate_; |
+ int last_request_id_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(ResourceDispatchThrottlerTest); |
+}; |
+ |
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledByDefault) { |
+ SetShouldAnticipateHighPriorityWork(false); |
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) { |
+ RequestResource(); |
+ EXPECT_EQ(i + 1, sent_messages_.size()); |
+ } |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSendLimitNotReached) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) { |
+ RequestResource(); |
+ EXPECT_EQ(i + 1, sent_messages_.size()); |
+ } |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, ThrottledWhenHighPriorityWork) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ RequestResource(); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+ |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(1U, sent_messages_.size()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, |
+ ThrottledWhenDeferredMessageQueueNonEmpty) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ RequestResource(); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+ SetShouldAnticipateHighPriorityWork(false); |
+ RequestResource(); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+ |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(2U, sent_messages_.size()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSufficientTimePassed) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ |
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) { |
+ Advance(base::TimeDelta::FromSecondsD(kFlushPeriodSeconds * 2)); |
+ RequestResource(); |
+ EXPECT_EQ(1U, GetAndResetSentMessageCount()); |
+ EXPECT_FALSE(RunScheduledFlush()); |
+ } |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfUnthrottledMessageType) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ |
+ RequestResourceSync(); |
+ EXPECT_EQ(1U, GetAndResetSentMessageCount()); |
+ |
+ // Saturate the queue. |
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ // Unthrottled message types should pass through untouched. |
+ RequestResourceSync(); |
+ EXPECT_EQ(1U, GetAndResetSentMessageCount()); |
+ RequestResourceSync(); |
+ EXPECT_EQ(1U, GetAndResetSentMessageCount()); |
+ |
+ // Deferred messages should flush as usual. |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, MultipleFlushes) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush * 4; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ for (size_t i = 0; i < 3; ++i) { |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()) << i; |
+ } |
+ |
+ EXPECT_FALSE(RunScheduledFlush()); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, MultipleFlushesWhileReceiving) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush * 4; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ for (size_t i = 0; i < 3; ++i) { |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()) << i; |
+ for (size_t j = 0; j < kRequestsPerFlush; ++j) |
+ RequestResource(); |
+ EXPECT_EQ(0U, sent_messages_.size()) << i; |
+ } |
+ |
+ for (size_t i = 0; i < 3; ++i) { |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ } |
+ |
+ EXPECT_FALSE(RunScheduledFlush()); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, CancelRequest) { |
+ RequestResource(); |
+ ASSERT_EQ(1U, GetAndResetSentMessageCount()); |
+ |
+ // Cancel messages for an unthrottled request will be forwarded immediately. |
+ CancelRequest(last_request_id()); |
+ ASSERT_EQ(1U, sent_messages_.size()); |
+ EXPECT_EQ(ResourceHostMsg_CancelRequest::ID, LastSentMessage()->type()); |
+ |
+ RequestResourcesUntilThrottled(); |
+ |
+ // The cancelled request should never be sent. |
+ CancelRequest(last_request_id()); |
+ ASSERT_EQ(0U, sent_messages_.size()); |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ EXPECT_EQ(0U, sent_messages_.size()); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, CancelRequestOutOfOrder) { |
+ SetShouldAnticipateHighPriorityWork(true); |
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()); |
+ |
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) |
+ RequestResource(); |
+ ASSERT_EQ(0U, sent_messages_.size()); |
+ |
+ // Cancel the first and last throttled request, ensuring they're not flushed. |
+ CancelRequest(last_request_id()); |
+ CancelRequest(last_request_id() - 2); |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ ASSERT_EQ(kRequestsPerFlush - 2, sent_messages_.size()); |
+ EXPECT_EQ(LastSentRequestId(), last_request_id() - 1); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, PriorityUpdate) { |
+ RequestResource(); |
+ ASSERT_EQ(1U, GetAndResetSentMessageCount()); |
+ |
+ // Priority updates for an unthrottled request will be forwarded immediately. |
+ UpdateRequestPriority(last_request_id(), net::HIGHEST); |
+ ASSERT_EQ(1U, sent_messages_.size()); |
+ EXPECT_EQ(ResourceHostMsg_DidChangePriority::ID, LastSentMessage()->type()); |
+ |
+ RequestResourcesUntilThrottled(); |
+ |
+ // The updated priority should be reflected in the flushed message. |
+ UpdateRequestPriority(last_request_id(), net::HIGHEST); |
+ ASSERT_EQ(0U, sent_messages_.size()); |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ ASSERT_EQ(1U, sent_messages_.size()); |
+ ResourceHostMsg_RequestResource::Param request_params; |
+ ASSERT_TRUE(ResourceHostMsg_RequestResource::Read(LastSentMessage(), |
+ &request_params)); |
+ EXPECT_EQ(net::HIGHEST, get<2>(request_params).priority); |
+} |
+ |
+TEST_F(ResourceDispatchThrottlerTest, ReleaseDownloadFileUpdate) { |
+ RequestResource(); |
+ ASSERT_EQ(1U, GetAndResetSentMessageCount()); |
+ |
+ // Download request updates for an unthrottled request will be |
+ // immediately. |
+ ReleaseDownloadedFile(last_request_id()); |
+ ASSERT_EQ(1U, sent_messages_.size()); |
+ EXPECT_EQ(ResourceHostMsg_ReleaseDownloadedFile::ID, |
+ LastSentMessage()->type()); |
+ |
+ RequestResourcesUntilThrottled(); |
+ |
+ // The download request update should be reflected in the flushed message. |
+ ReleaseDownloadedFile(last_request_id()); |
+ ASSERT_EQ(0U, sent_messages_.size()); |
+ EXPECT_TRUE(RunScheduledFlush()); |
+ ASSERT_EQ(1U, sent_messages_.size()); |
+ ResourceHostMsg_RequestResource::Param request_params; |
+ ASSERT_TRUE(ResourceHostMsg_RequestResource::Read(LastSentMessage(), |
+ &request_params)); |
+ EXPECT_FALSE(get<2>(request_params).download_to_file); |
+} |
+ |
+} // namespace content |