Chromium Code Reviews| Index: content/renderer/scheduler/resource_dispatch_throttler.cc |
| diff --git a/content/renderer/scheduler/resource_dispatch_throttler.cc b/content/renderer/scheduler/resource_dispatch_throttler.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..7aa09704245fa05d2671253da617b15ccc5700b7 |
| --- /dev/null |
| +++ b/content/renderer/scheduler/resource_dispatch_throttler.cc |
| @@ -0,0 +1,132 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/scheduler/resource_dispatch_throttler.h" |
| + |
| +#include "base/auto_reset.h" |
| +#include "base/debug/trace_event.h" |
| +#include "content/common/resource_messages.h" |
| +#include "content/renderer/scheduler/renderer_scheduler.h" |
| +#include "ipc/ipc_message_macros.h" |
| + |
| +namespace content { |
| +namespace { |
| + |
| +bool IsResourceRequest(const IPC::Message& msg) { |
| + return msg.type() == ResourceHostMsg_RequestResource::ID; |
| +} |
| + |
| +} // namespace |
| + |
| +ResourceDispatchThrottler::ResourceDispatchThrottler( |
| + IPC::Sender* proxied_sender, |
| + RendererScheduler* scheduler, |
| + base::TimeDelta flush_period, |
| + uint32 max_requests_per_flush) |
| + : proxied_sender_(proxied_sender), |
| + scheduler_(scheduler), |
| + flush_period_(flush_period), |
| + max_requests_per_flush_(max_requests_per_flush), |
| + flush_timer_( |
| + FROM_HERE, |
| + flush_period_, |
| + base::Bind(&ResourceDispatchThrottler::Flush, base::Unretained(this)), |
| + false /* is_repeating */), |
| + sent_requests_since_last_flush_(0) { |
| + DCHECK(proxied_sender); |
| + DCHECK(scheduler); |
| + DCHECK_NE(flush_period_, base::TimeDelta()); |
| + DCHECK(max_requests_per_flush_); |
| + flush_timer_.SetTaskRunner(scheduler->LoadingTaskRunner()); |
| +} |
| + |
| +ResourceDispatchThrottler::~ResourceDispatchThrottler() { |
| + std::deque<IPC::Message*> throttled_messages; |
| + throttled_messages.swap(throttled_messages_); |
| + for (auto& message: throttled_messages) |
|
davidben
2015/02/03 19:46:47
Nit: Taking a random sample from codesearch, put a
jdduke (slow)
2015/02/04 01:39:50
Done.
|
| + ForwardMessage(message); |
| + // There shouldn't be re-entrancy issues when forwarding an IPC, but validate |
| + // as a safeguard. |
| + DCHECK(throttled_messages_.empty()); |
| +} |
| + |
| +bool ResourceDispatchThrottler::Send(IPC::Message* msg) { |
| + thread_checker_.CalledOnValidThread(); |
|
davidben
2015/02/03 19:46:47
DCHECK(....CalledOnValidThread());
jdduke (slow)
2015/02/04 01:39:50
Done.
|
| + if (msg->is_sync()) |
| + return ForwardMessage(msg); |
| + |
| + // Always defer message forwarding if there are pending messages, ensuring |
| + // message dispatch ordering consistency. |
| + if (!throttled_messages_.empty()) { |
| + TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleMessage", |
| + TRACE_EVENT_SCOPE_THREAD); |
| + throttled_messages_.push_back(msg); |
| + return true; |
| + } |
| + |
| + if (!IsResourceRequest(*msg)) |
| + return ForwardMessage(msg); |
| + |
| + if (!scheduler_->IsHighPriorityWorkAnticipated()) |
| + return ForwardMessage(msg); |
| + |
| + if (Now() > (last_sent_request_time_ + flush_period_)) { |
| + // If sufficient time has passed since the previous send, we can effectively |
| + // mark the pipeline as flushed. |
| + sent_requests_since_last_flush_ = 0; |
| + return ForwardMessage(msg); |
| + } |
| + |
| + if (sent_requests_since_last_flush_ < max_requests_per_flush_) |
| + return ForwardMessage(msg); |
| + |
| + TRACE_EVENT_INSTANT0("loader", "ResourceDispatchThrottler::ThrottleRequest", |
| + TRACE_EVENT_SCOPE_THREAD); |
| + throttled_messages_.push_back(msg); |
| + ScheduleFlush(); |
| + return true; |
| +} |
| + |
| +base::TimeTicks ResourceDispatchThrottler::Now() const { |
| + return base::TimeTicks::Now(); |
| +} |
| + |
| +void ResourceDispatchThrottler::ScheduleFlush() { |
| + DCHECK(!flush_timer_.IsRunning()); |
| + flush_timer_.Reset(); |
| +} |
| + |
| +void ResourceDispatchThrottler::Flush() { |
| + TRACE_EVENT1("loader", "ResourceDispatchThrottler::Flush", |
| + "total_throttled_messages", throttled_messages_.size()); |
| + sent_requests_since_last_flush_ = 0; |
| + |
| + // If high-priority work is no longer anticipated, dispatch can be safely |
| + // accelerated. Avoid completely flushing in such case in the event that |
| + // a large number of requests have been throttled. |
| + uint32 max_requests = scheduler_->IsHighPriorityWorkAnticipated() |
| + ? max_requests_per_flush_ |
| + : max_requests_per_flush_ * 2; |
| + |
| + while (!throttled_messages_.empty() && |
| + (sent_requests_since_last_flush_ < max_requests || |
| + !IsResourceRequest(*throttled_messages_.front()))) { |
| + IPC::Message* msg = throttled_messages_.front(); |
| + throttled_messages_.pop_front(); |
| + ForwardMessage(msg); |
| + } |
| + |
| + if (!throttled_messages_.empty()) |
| + ScheduleFlush(); |
| +} |
| + |
| +bool ResourceDispatchThrottler::ForwardMessage(IPC::Message* msg) { |
| + if (IsResourceRequest(*msg)) { |
| + last_sent_request_time_ = Now(); |
| + ++sent_requests_since_last_flush_; |
| + } |
| + return proxied_sender_->Send(msg); |
| +} |
| + |
| +} // namespace content |