Index: net/base/priority_dispatch.cc |
diff --git a/net/base/priority_dispatch.cc b/net/base/priority_dispatch.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..d5bf2f3b297bcc3c2db1b907547c51b633b87726 |
--- /dev/null |
+++ b/net/base/priority_dispatch.cc |
@@ -0,0 +1,112 @@ |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "net/base/priority_dispatch.h" |
+ |
+#include "base/logging.h" |
+ |
+namespace net { |
+ |
+// We rely on the priority enum values being sequential having starting at 0, |
+// and increasing for lower priorities. |
+COMPILE_ASSERT(HIGHEST == 0u && |
+ LOWEST > HIGHEST && |
+ IDLE > LOWEST && |
+ NUM_PRIORITIES > IDLE, |
+ priority_indexes_incompatible); |
+ |
+// static |
+PriorityDispatch::Limits PriorityDispatch::Limits::MakeAny( |
+ size_t max_idle) { |
+ Limits limits = {{0, 0, 0, 0, max_idle}}; |
mmenke
2011/12/21 16:22:58
You're relying on NUM_PRIORITIES being 5 here. Th
szym
2011/12/28 01:24:10
Done.
|
+ return limits; |
+} |
+ |
+size_t PriorityDispatch::Limits::Total() const { |
+ size_t total = 0; |
+ for (size_t i = 0; i < NUM_PRIORITIES; ++i) { |
+ total+= reserved_slots[i]; |
mmenke
2011/12/21 16:22:58
nit: Space before the +=. Applies elsewhere in t
szym
2011/12/28 01:24:10
Done.
|
+ } |
+ return total; |
+} |
+ |
+PriorityDispatch::PriorityDispatch(const Limits& limits, size_t max_queued) |
+ : num_running_jobs_(0), max_queued_jobs_(max_queued) { |
+ size_t total = 0; |
+ for (size_t i = 0; i < NUM_PRIORITIES; ++i) { |
+ total+= limits.reserved_slots[NUM_PRIORITIES - i - 1]; |
+ max_running_jobs_[NUM_PRIORITIES - i - 1] = total; |
+ } |
+} |
+ |
+PriorityDispatch::~PriorityDispatch() {} |
+ |
+void PriorityDispatch::SetMaxQueued(size_t max_queued) { |
+ DCHECK_EQ(0u, num_queued_jobs()); |
+ max_queued_jobs_ = max_queued; |
+} |
+ |
+PriorityDispatch::Handle PriorityDispatch::Add(Job* job, |
+ RequestPriority priority) { |
+ DCHECK(job); |
+ if (num_running_jobs_ < max_running_jobs_[priority]) { |
+ ++num_running_jobs_; |
+ job->Start(); |
+ return Handle(); |
+ } |
+ Handle handle = queue_.Insert(job, priority); |
+ if (queue_.size() > max_queued_jobs_) { |
+ // Evict oldest lowest-priority job. |
+ Handle evicted = queue_.OldestLowest(); |
+ DCHECK(!evicted.is_null()); |
+ if (evicted.equals(handle)) |
+ handle = Handle(); |
+ Job* evicted_job = evicted.value(); |
+ queue_.Erase(evicted); |
+ DCHECK(evicted_job); |
+ evicted_job->OnEvicted(); |
+ } |
+ return handle; |
+} |
+ |
+void PriorityDispatch::Cancel(const Handle& handle) { |
+ queue_.Erase(handle); |
+} |
+ |
+PriorityDispatch::Handle PriorityDispatch::Update(const Handle& handle, |
+ RequestPriority priority) { |
+ DCHECK(!handle.is_null()); |
+ DCHECK_GE(num_running_jobs_, max_running_jobs_[handle.priority()]) << |
+ "Job should not be in queue when limits permit it to start."; |
+ if (num_running_jobs_ < max_running_jobs_[priority]) { |
mmenke
2011/12/21 16:22:58
You're pretty much duplicating half of OnJobFinish
szym
2011/12/28 01:24:10
Done.
|
+ Job* job = handle.value(); |
+ queue_.Erase(handle); |
+ ++num_running_jobs_; |
+ DCHECK(job); |
+ job->Start(); |
+ return Handle(); |
+ } |
+ return queue_.Move(handle, priority); |
mmenke
2011/12/21 16:22:58
I think it might be a little simpler, and not all
szym
2011/12/28 01:24:10
Agreed. "Move" would make more sense if it preserv
|
+} |
+ |
+void PriorityDispatch::OnJobFinished() { |
+ DCHECK_GT(num_running_jobs_, 0u); |
+ --num_running_jobs_; |
+ Handle handle = queue_.First(); |
+ if (handle.is_null()) { |
+ DCHECK_EQ(0u, queue_.size()); |
+ return; |
+ } |
+ if (num_running_jobs_ < max_running_jobs_[handle.priority()]) { |
+ Job* job = handle.value(); |
+ queue_.Erase(handle); |
+ ++num_running_jobs_; |
+ DCHECK(job); |
+ job->Start(); |
+ } |
+} |
+ |
+} // namespace net |
+ |
+ |