Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1267)

Unified Diff: content/renderer/scheduler/resource_dispatch_throttler_unittest.cc

Issue 847883002: Reland "Throttle resource message requests during user interaction" (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/renderer/scheduler/resource_dispatch_throttler_unittest.cc
diff --git a/content/renderer/scheduler/resource_dispatch_throttler_unittest.cc b/content/renderer/scheduler/resource_dispatch_throttler_unittest.cc
new file mode 100644
index 0000000000000000000000000000000000000000..d565e28967bc3a18198b01eacd9f693dc7467f96
--- /dev/null
+++ b/content/renderer/scheduler/resource_dispatch_throttler_unittest.cc
@@ -0,0 +1,345 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/scheduler/resource_dispatch_throttler.h"
+
+#include "base/memory/scoped_vector.h"
+#include "content/common/resource_messages.h"
+#include "content/test/fake_renderer_scheduler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+namespace {
+
+const uint32 kRequestsPerFlush = 4;
+const double kFlushPeriodSeconds = 1.f / 60;
+const int kRoutingId = 1;
+
+typedef ScopedVector<IPC::Message> ScopedMessages;
+
+int GetRequestId(const IPC::Message& msg) {
+ int request_id = -1;
+ switch (msg.type()) {
+ case ResourceHostMsg_RequestResource::ID: {
+ PickleIterator iter(msg);
+ int routing_id = -1;
+ if (!iter.ReadInt(&routing_id) || !iter.ReadInt(&request_id))
+ NOTREACHED() << "Invalid id for resource request message.";
+ } break;
+
+ case ResourceHostMsg_DidChangePriority::ID:
+ case ResourceHostMsg_ReleaseDownloadedFile::ID:
+ case ResourceHostMsg_CancelRequest::ID:
+ if (!PickleIterator(msg).ReadInt(&request_id))
+ NOTREACHED() << "Invalid id for resource message.";
+ break;
+
+ default:
+ NOTREACHED() << "Invalid message for resource throttling.";
+ break;
+ }
+ return request_id;
+}
+
+class SenderSink : public IPC::Sender {
+ public:
+ SenderSink(ScopedMessages* sent_messages) : sent_messages_(sent_messages) {}
+
+ // IPC::Sender implementation:
+ bool Send(IPC::Message* msg) override {
+ sent_messages_->push_back(msg);
+ return true;
+ }
+
+ private:
+ ScopedMessages* sent_messages_;
+};
+
+} // namespace
+
+class ResourceDispatchThrottlerTest : public testing::Test,
+ public FakeRendererScheduler,
+ public ResourceDispatchThrottler {
+ public:
+ ResourceDispatchThrottlerTest()
+ : ResourceDispatchThrottler(
+ &sender_sink_,
+ this,
davidben 2015/02/03 19:46:48 Having the FakeRendererScheduler and the ResourceD
jdduke (slow) 2015/02/04 01:39:50 Yeah, I get lazy in testing code sometimes, but th
+ base::TimeDelta::FromSecondsD(kFlushPeriodSeconds),
+ kRequestsPerFlush),
+ sender_sink_(&sent_messages_),
+ flush_scheduled_(false),
+ anticipate_(false),
+ last_request_id_(0) {}
+ ~ResourceDispatchThrottlerTest() override {}
+
+ // RendererScheduler implementation:
+ bool IsHighPriorityWorkAnticipated() override { return anticipate_; }
+
+ protected:
+ base::TimeTicks Now() const override { return now_; }
+
+ void ScheduleFlush() override { flush_scheduled_ = true; }
+
+ void SetHighPriorityWorkAnticipated(bool anticipate) {
+ anticipate_ = anticipate;
+ }
+
+ void Advance(base::TimeDelta delta) { now_ += delta; }
+
+ bool RunScheduledFlush() {
+ if (!flush_scheduled_)
+ return false;
+
+ flush_scheduled_ = false;
+ Flush();
+ return true;
+ }
+
+ bool RequestResource() {
+ ResourceHostMsg_Request request;
+ request.download_to_file = true;
+ return Send(new ResourceHostMsg_RequestResource(
+ kRoutingId, ++last_request_id_, request));
+ }
+
+ void RequestResourcesUntilThrottled() {
+ SetHighPriorityWorkAnticipated(true);
+ GetAndResetSentMessageCount();
+ for (RequestResource(); GetAndResetSentMessageCount(); RequestResource()) {
davidben 2015/02/03 19:46:48 This looks very different from a normal for loop.
jdduke (slow) 2015/02/04 01:39:50 Done.
+ }
+ }
+
+ bool UpdateRequestPriority(int request_id, net::RequestPriority priority) {
+ return Send(new ResourceHostMsg_DidChangePriority(request_id, priority, 0));
+ }
+
+ bool ReleaseDownloadedFile(int request_id) {
+ return Send(new ResourceHostMsg_ReleaseDownloadedFile(request_id));
+ }
+
+ bool CancelRequest(int request_id) {
+ return Send(new ResourceHostMsg_CancelRequest(request_id));
+ }
+
+ bool RequestResourceSync() {
+ SyncLoadResult result;
+ return Send(new ResourceHostMsg_SyncLoad(
+ kRoutingId, ++last_request_id_, ResourceHostMsg_Request(), &result));
+ }
+
+ size_t GetAndResetSentMessageCount() {
+ size_t sent_message_count = sent_messages_.size();
+ sent_messages_.clear();
+ return sent_message_count;
+ }
+
+ const IPC::Message* LastSentMessage() const {
+ return sent_messages_.empty() ? nullptr : sent_messages_.back();
+ }
+
+ int LastSentRequestId() const {
+ const IPC::Message* msg = LastSentMessage();
+ if (!msg)
+ return -1;
+
+ int routing_id = -1;
+ int request_id = -1;
+ PickleIterator iter(*msg);
+ CHECK(IPC::ReadParam(msg, &iter, &routing_id));
+ CHECK(IPC::ReadParam(msg, &iter, &request_id));
+ return request_id;
+ }
+
+ bool flush_scheduled() const { return flush_scheduled_; }
+
+ int last_request_id() const { return last_request_id_; }
+
+ ScopedMessages sent_messages_;
+
+ private:
+ SenderSink sender_sink_;
+ base::MessageLoopForUI message_loop_;
+ base::TimeTicks now_;
+ bool flush_scheduled_;
+ bool anticipate_;
+ int last_request_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResourceDispatchThrottlerTest);
+};
+
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledByDefault) {
+ SetHighPriorityWorkAnticipated(false);
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) {
+ RequestResource();
+ EXPECT_EQ(i + 1, sent_messages_.size());
+ }
+}
+
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSendLimitNotReached) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) {
+ RequestResource();
+ EXPECT_EQ(i + 1, sent_messages_.size());
+ }
+}
+
+TEST_F(ResourceDispatchThrottlerTest, ThrottledWhenHighPriorityWork) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ RequestResource();
+ EXPECT_EQ(0U, sent_messages_.size());
+
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(1U, sent_messages_.size());
+}
+
+TEST_F(ResourceDispatchThrottlerTest,
+ ThrottledWhenDeferredMessageQueueNonEmpty) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ RequestResource();
+ EXPECT_EQ(0U, sent_messages_.size());
+ SetHighPriorityWorkAnticipated(false);
+ RequestResource();
+ EXPECT_EQ(0U, sent_messages_.size());
+
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(2U, sent_messages_.size());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSufficientTimePassed) {
+ SetHighPriorityWorkAnticipated(true);
+
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) {
+ Advance(base::TimeDelta::FromSecondsD(kFlushPeriodSeconds * 2));
+ RequestResource();
+ EXPECT_EQ(1U, GetAndResetSentMessageCount());
+ EXPECT_FALSE(flush_scheduled());
+ }
+}
+
+TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSyncMessage) {
+ SetHighPriorityWorkAnticipated(true);
+
+ RequestResourceSync();
+ EXPECT_EQ(1U, GetAndResetSentMessageCount());
+
+ // Saturate the queue.
+ for (size_t i = 0; i < kRequestsPerFlush * 2; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ // Unthrottled message types should pass through untouched.
+ RequestResourceSync();
+ EXPECT_EQ(1U, GetAndResetSentMessageCount());
+ RequestResourceSync();
+ EXPECT_EQ(1U, GetAndResetSentMessageCount());
+
+ // Deferred messages should flush as usual.
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, MultipleFlushes) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush * 4; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()) << i;
davidben 2015/02/03 19:46:47 You could probably stick a SCOPED_TRACE(i) and tha
jdduke (slow) 2015/02/04 01:39:50 Done.
+ }
+
+ EXPECT_FALSE(flush_scheduled());
+ EXPECT_EQ(0U, sent_messages_.size());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, MultipleFlushesWhileReceiving) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush * 4; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount()) << i;
+ for (size_t j = 0; j < kRequestsPerFlush; ++j)
+ RequestResource();
+ EXPECT_EQ(0U, sent_messages_.size()) << i;
+ }
+
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+ }
+
+ EXPECT_FALSE(flush_scheduled());
+ EXPECT_EQ(0U, sent_messages_.size());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, NonRequestsNeverTriggerThrottling) {
+ RequestResource();
+ ASSERT_EQ(1U, GetAndResetSentMessageCount());
+
+ for (size_t i = 0; i < kRequestsPerFlush * 3; ++i)
+ UpdateRequestPriority(last_request_id(), net::HIGHEST);
+ EXPECT_EQ(kRequestsPerFlush * 3, sent_messages_.size());
+
+ RequestResource();
+ EXPECT_EQ(1U + kRequestsPerFlush * 3, GetAndResetSentMessageCount());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, NonRequestsDeferredWhenThrottling) {
+ RequestResource();
+ ASSERT_EQ(1U, GetAndResetSentMessageCount());
+
+ RequestResourcesUntilThrottled();
+ UpdateRequestPriority(last_request_id(), net::HIGHEST);
+ ReleaseDownloadedFile(last_request_id());
+ CancelRequest(last_request_id());
+
+ EXPECT_TRUE(RunScheduledFlush());
+ EXPECT_EQ(4U, GetAndResetSentMessageCount());
+ EXPECT_FALSE(flush_scheduled());
+}
+
+TEST_F(ResourceDispatchThrottlerTest, MessageOrderingPreservedWhenThrottling) {
+ SetHighPriorityWorkAnticipated(true);
+ for (size_t i = 0; i < kRequestsPerFlush; ++i)
+ RequestResource();
+ ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
+
+ for (size_t i = 0; i < kRequestsPerFlush; ++i) {
+ RequestResource();
+ UpdateRequestPriority(last_request_id(), net::HIGHEST);
+ CancelRequest(last_request_id() - 1);
+ }
+ ASSERT_EQ(0U, sent_messages_.size());
+
+ EXPECT_TRUE(RunScheduledFlush());
+ ASSERT_EQ(kRequestsPerFlush * 3, sent_messages_.size());
+ for (size_t i = 0; i < sent_messages_.size(); i+=3) {
+ const auto& request_msg = *sent_messages_[i];
+ const auto& priority_msg = *sent_messages_[i + 1];
+ const auto& cancel_msg = *sent_messages_[i + 2];
+
+ EXPECT_EQ(request_msg.type(), ResourceHostMsg_RequestResource::ID) << i;
+ EXPECT_EQ(priority_msg.type(), ResourceHostMsg_DidChangePriority::ID) << i;
+ EXPECT_EQ(cancel_msg.type(), ResourceHostMsg_CancelRequest::ID) << i;
+
+ EXPECT_EQ(GetRequestId(request_msg), GetRequestId(priority_msg));
+ EXPECT_EQ(GetRequestId(request_msg) - 1, GetRequestId(cancel_msg));
+ }
+ EXPECT_FALSE(flush_scheduled());
+}
+
+} // namespace content

Powered by Google App Engine
This is Rietveld 408576698