Index: content/renderer/scheduler/renderer_scheduler_selector.cc |
diff --git a/content/renderer/scheduler/renderer_scheduler_selector.cc b/content/renderer/scheduler/renderer_scheduler_selector.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..7d312b8a096afb4558af3c8e40240dd58cfe0f7d |
--- /dev/null |
+++ b/content/renderer/scheduler/renderer_scheduler_selector.cc |
@@ -0,0 +1,115 @@ |
+// Copyright 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/renderer/scheduler/renderer_scheduler_selector.h" |
+ |
+#include "base/logging.h" |
+#include "base/pending_task.h" |
+ |
+namespace content { |
+ |
+namespace { |
+ int kBitsInByte = 8; |
Sami
2014/10/20 13:31:08
nit: const int. Generally things in namespaces are
petrcermak
2014/10/20 17:43:36
Is there any reason why we don't use CHAR_BIT from
|
+} |
+ |
+RendererSchedulerSelector::RendererSchedulerSelector() { } |
+ |
+RendererSchedulerSelector::~RendererSchedulerSelector() { } |
+ |
+void RendererSchedulerSelector::RegisterWorkQueues( |
+ const std::vector<const base::TaskQueue*>& work_queues) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ DCHECK(work_queues.size() < queue_priorities_[kNormalPriority] * kBitsInByte); |
Sami
2014/10/20 13:31:09
What are we checking for here? It seems like we'd
petrcermak
2014/10/20 17:43:35
I think that it should be "sizeof(queue_priorities
|
+ work_queues_ = work_queues; |
+ // By default, all work queues are set to normal priority. |
+ queue_priorities_[kControlPriority] = 0; |
+ queue_priorities_[kHighPriority] = 0; |
+ queue_priorities_[kNormalPriority] = (1 << work_queues.size()) - 1; |
+ queue_priorities_[kBestEffortPriority] = 0; |
+} |
+ |
+RendererSchedulerSelector::QueuePriority RendererSchedulerSelector::NextPriorty( |
+ QueuePriority priority) { |
+ DCHECK(priority < kQueuePriorityCount); |
+ return static_cast<QueuePriority>(static_cast<int>(priority) + 1); |
+} |
+ |
+void RendererSchedulerSelector::SetQueuePriority( |
+ size_t queue_id, QueuePriority set_priority) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ DCHECK(queue_id < work_queues_.size()); |
petrcermak
2014/10/20 17:43:36
DCHECK_LT (twice)
|
+ DCHECK(set_priority < kQueuePriorityCount); |
+ int64_t queue_id_mask_ = 1 << queue_id; |
Sami
2014/10/20 13:31:09
size_t? Also the RHS has only 32 bits. Also no und
|
+ for (QueuePriority priority = kControlPriority; |
+ priority < kQueuePriorityCount; |
+ priority = NextPriorty(priority)) { |
+ if (priority == set_priority) { |
+ queue_priorities_[priority] |= queue_id_mask_; |
+ } else { |
+ queue_priorities_[priority] &= ~queue_id_mask_; |
+ } |
+ } |
+} |
+ |
+void RendererSchedulerSelector::EnableQueue( |
+ size_t queue_id, QueuePriority priority) { |
+ SetQueuePriority(queue_id, priority); |
+} |
+ |
+void RendererSchedulerSelector::DisableQueue(size_t queue_id) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ DCHECK(queue_id < work_queues_.size()); |
+ int64_t queue_id_mask_ = 1 << queue_id; |
petrcermak
2014/10/20 17:43:35
Why don't you calculate the inverse mask straighta
|
+ for (QueuePriority priority = kControlPriority; |
+ priority < kQueuePriorityCount; |
+ priority = NextPriorty(priority)) { |
+ queue_priorities_[priority] &= ~queue_id_mask_; |
+ } |
+} |
+ |
+bool RendererSchedulerSelector::QueueEnabledWithPriority( |
+ size_t queue_id, QueuePriority priority) { |
+ DCHECK(queue_id < work_queues_.size()); |
+ DCHECK(priority < kQueuePriorityCount); |
+ return queue_priorities_[priority] & (1 << queue_id); |
+} |
+ |
+bool RendererSchedulerSelector::ChooseOldestWithPriority( |
+ QueuePriority priority, size_t* out_queue_index) { |
+ bool found_non_empty_queue = false; |
+ size_t chosen_queue = 0; |
+ for (size_t queue_id = 0; queue_id < work_queues_.size(); queue_id++) { |
+ if (!QueueEnabledWithPriority(queue_id, priority) || |
+ work_queues_[queue_id]->empty()) { |
+ continue; |
+ } |
+ if (!found_non_empty_queue || |
+ work_queues_[queue_id]->front() < work_queues_[chosen_queue]->front()) { |
+ found_non_empty_queue = true; |
+ chosen_queue = queue_id; |
+ } |
+ } |
+ |
+ if (found_non_empty_queue) { |
+ *out_queue_index = chosen_queue; |
+ } |
+ return found_non_empty_queue; |
+} |
+ |
+bool RendererSchedulerSelector::SelectWorkQueueToService( |
+ size_t* out_queue_index) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ DCHECK(work_queues_.size()); |
+ for (QueuePriority priority = kControlPriority; |
Sami
2014/10/20 13:31:09
It seems like this prioritizes the high priority s
|
+ priority < kQueuePriorityCount; |
+ priority = NextPriorty(priority)) { |
+ if (ChooseOldestWithPriority(priority, out_queue_index)) { |
+ return true; |
+ } |
+ } |
+ return false; |
+} |
+ |
+ |
+} // namespace content |