Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <set> | |
| 6 | |
| 5 #include "content/browser/loader/resource_scheduler.h" | 7 #include "content/browser/loader/resource_scheduler.h" |
| 6 | 8 |
| 7 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
| 8 #include "content/common/resource_messages.h" | 10 #include "content/common/resource_messages.h" |
| 9 #include "content/browser/loader/resource_message_delegate.h" | 11 #include "content/browser/loader/resource_message_delegate.h" |
| 10 #include "content/public/browser/resource_controller.h" | 12 #include "content/public/browser/resource_controller.h" |
| 11 #include "content/public/browser/resource_request_info.h" | 13 #include "content/public/browser/resource_request_info.h" |
| 12 #include "content/public/browser/resource_throttle.h" | 14 #include "content/public/browser/resource_throttle.h" |
| 13 #include "ipc/ipc_message_macros.h" | 15 #include "ipc/ipc_message_macros.h" |
| 14 #include "net/base/host_port_pair.h" | 16 #include "net/base/host_port_pair.h" |
| 15 #include "net/base/load_flags.h" | 17 #include "net/base/load_flags.h" |
| 16 #include "net/base/request_priority.h" | 18 #include "net/base/request_priority.h" |
| 17 #include "net/http/http_server_properties.h" | 19 #include "net/http/http_server_properties.h" |
| 18 #include "net/url_request/url_request.h" | 20 #include "net/url_request/url_request.h" |
| 19 #include "net/url_request/url_request_context.h" | 21 #include "net/url_request/url_request_context.h" |
| 20 | 22 |
| 21 namespace content { | 23 namespace content { |
| 22 | 24 |
| 23 static const size_t kMaxNumDelayableRequestsPerClient = 10; | 25 static const size_t kMaxNumDelayableRequestsPerClient = 10; |
| 24 static const size_t kMaxNumDelayableRequestsPerHost = 6; | 26 static const size_t kMaxNumDelayableRequestsPerHost = 6; |
| 25 | 27 |
| 26 // A thin wrapper around net::PriorityQueue that deals with | 28 |
| 27 // ScheduledResourceRequests instead of PriorityQueue::Pointers. | |
| 28 class ResourceScheduler::RequestQueue { | 29 class ResourceScheduler::RequestQueue { |
| 29 private: | 30 public: |
| 30 typedef net::PriorityQueue<ScheduledResourceRequest*> NetQueue; | 31 typedef std::multiset<ScheduledResourceRequest*, ScheduledResourceSorter> |
|
James Simonsen
2014/02/07 03:45:09
Is multiset guaranteed to be FIFO for equivalent e
shatch
2014/02/10 19:55:47
Hmm, searching for this it looks like it depends o
| |
| 32 NetQueue; | |
| 31 | 33 |
| 32 public: | 34 RequestQueue() {} |
| 33 class Iterator { | |
| 34 public: | |
| 35 Iterator(NetQueue* queue) : queue_(queue) { | |
| 36 DCHECK(queue != NULL); | |
| 37 current_pointer_ = queue_->FirstMax(); | |
| 38 } | |
| 39 | |
| 40 Iterator& operator++() { | |
| 41 current_pointer_ = queue_->GetNextTowardsLastMin(current_pointer_); | |
| 42 return *this; | |
| 43 } | |
| 44 | |
| 45 Iterator operator++(int) { | |
| 46 Iterator result(*this); | |
| 47 ++(*this); | |
| 48 return result; | |
| 49 } | |
| 50 | |
| 51 ScheduledResourceRequest* value() { | |
| 52 return current_pointer_.value(); | |
| 53 } | |
| 54 | |
| 55 bool is_null() { | |
| 56 return current_pointer_.is_null(); | |
| 57 } | |
| 58 | |
| 59 private: | |
| 60 NetQueue* queue_; | |
| 61 NetQueue::Pointer current_pointer_; | |
| 62 }; | |
| 63 | |
| 64 RequestQueue() : queue_(net::NUM_PRIORITIES) {} | |
| 65 ~RequestQueue() {} | 35 ~RequestQueue() {} |
| 66 | 36 |
| 67 // Adds |request| to the queue with given |priority|. | 37 // Adds |request| to the queue with given |priority|. |
| 68 void Insert(ScheduledResourceRequest* request, | 38 void Insert(ScheduledResourceRequest* request) { |
| 69 net::RequestPriority priority) { | |
| 70 DCHECK(!ContainsKey(pointers_, request)); | 39 DCHECK(!ContainsKey(pointers_, request)); |
| 71 NetQueue::Pointer pointer = queue_.Insert(request, priority); | 40 pointers_[request] = queue_.insert(request); |
| 72 pointers_[request] = pointer; | |
| 73 } | 41 } |
| 74 | 42 |
| 75 // Removes |request| from the queue. | 43 // Removes |request| from the queue. |
| 76 void Erase(ScheduledResourceRequest* request) { | 44 void Erase(ScheduledResourceRequest* request) { |
| 77 PointerMap::iterator it = pointers_.find(request); | 45 PointerMap::iterator it = pointers_.find(request); |
| 78 DCHECK(it != pointers_.end()); | 46 DCHECK(it != pointers_.end()); |
| 79 if (it == pointers_.end()) | 47 if (it == pointers_.end()) |
| 80 return; | 48 return; |
| 81 queue_.Erase(it->second); | 49 queue_.erase(it->second); |
| 82 pointers_.erase(it); | 50 pointers_.erase(it); |
| 83 } | 51 } |
| 84 | 52 |
| 85 // Returns the highest priority request that's queued, or NULL if none are. | 53 NetQueue::iterator GetNextHighestIterator() { |
| 86 ScheduledResourceRequest* FirstMax() { | 54 return queue_.begin(); |
| 87 return queue_.FirstMax().value(); | |
| 88 } | 55 } |
| 89 | 56 |
| 90 Iterator GetNextHighestIterator() { | 57 NetQueue::iterator End() { |
| 91 return Iterator(&queue_); | 58 return queue_.end(); |
| 92 } | 59 } |
| 93 | 60 |
| 94 // Returns true if |request| is queued. | 61 // Returns true if |request| is queued. |
| 95 bool IsQueued(ScheduledResourceRequest* request) const { | 62 bool IsQueued(ScheduledResourceRequest* request) const { |
| 96 return ContainsKey(pointers_, request); | 63 return ContainsKey(pointers_, request); |
| 97 } | 64 } |
| 98 | 65 |
| 99 // Returns true if no requests are queued. | 66 // Returns true if no requests are queued. |
| 100 bool IsEmpty() const { return queue_.size() == 0; } | 67 bool IsEmpty() const { return queue_.size() == 0; } |
| 101 | 68 |
| 102 private: | 69 private: |
| 103 typedef std::map<ScheduledResourceRequest*, NetQueue::Pointer> PointerMap; | 70 typedef std::map<ScheduledResourceRequest*, NetQueue::iterator> PointerMap; |
| 104 | 71 |
| 105 NetQueue queue_; | 72 NetQueue queue_; |
| 106 PointerMap pointers_; | 73 PointerMap pointers_; |
| 107 }; | 74 }; |
| 108 | 75 |
| 109 // This is the handle we return to the ResourceDispatcherHostImpl so it can | 76 // This is the handle we return to the ResourceDispatcherHostImpl so it can |
| 110 // interact with the request. | 77 // interact with the request. |
| 111 class ResourceScheduler::ScheduledResourceRequest | 78 class ResourceScheduler::ScheduledResourceRequest |
| 112 : public ResourceMessageDelegate, | 79 : public ResourceMessageDelegate, |
| 113 public ResourceThrottle { | 80 public ResourceThrottle { |
| 114 public: | 81 public: |
| 115 ScheduledResourceRequest(const ClientId& client_id, | 82 ScheduledResourceRequest(const ClientId& client_id, |
| 116 net::URLRequest* request, | 83 net::URLRequest* request, |
| 117 ResourceScheduler* scheduler) | 84 ResourceScheduler* scheduler) |
| 118 : ResourceMessageDelegate(request), | 85 : ResourceMessageDelegate(request), |
| 119 client_id_(client_id), | 86 client_id_(client_id), |
| 120 request_(request), | 87 request_(request), |
| 121 ready_(false), | 88 ready_(false), |
| 122 deferred_(false), | 89 deferred_(false), |
| 123 scheduler_(scheduler) { | 90 scheduler_(scheduler), |
| 91 intra_priority_value_(0) { | |
| 124 TRACE_EVENT_ASYNC_BEGIN1("net", "URLRequest", request_, | 92 TRACE_EVENT_ASYNC_BEGIN1("net", "URLRequest", request_, |
| 125 "url", request->url().spec()); | 93 "url", request->url().spec()); |
| 126 } | 94 } |
| 127 | 95 |
| 128 virtual ~ScheduledResourceRequest() { | 96 virtual ~ScheduledResourceRequest() { |
| 129 scheduler_->RemoveRequest(this); | 97 scheduler_->RemoveRequest(this); |
| 130 } | 98 } |
| 131 | 99 |
| 132 void Start() { | 100 void Start() { |
| 133 TRACE_EVENT_ASYNC_STEP_PAST0("net", "URLRequest", request_, "Queued"); | 101 TRACE_EVENT_ASYNC_STEP_PAST0("net", "URLRequest", request_, "Queued"); |
| 134 ready_ = true; | 102 ready_ = true; |
| 135 if (deferred_ && request_->status().is_success()) { | 103 if (deferred_ && request_->status().is_success()) { |
| 136 deferred_ = false; | 104 deferred_ = false; |
| 137 controller()->Resume(); | 105 controller()->Resume(); |
| 138 } | 106 } |
| 139 } | 107 } |
| 140 | 108 |
| 141 const ClientId& client_id() const { return client_id_; } | 109 const ClientId& client_id() const { return client_id_; } |
| 142 net::URLRequest* url_request() { return request_; } | 110 net::URLRequest* url_request() { return request_; } |
| 143 const net::URLRequest* url_request() const { return request_; } | 111 const net::URLRequest* url_request() const { return request_; } |
| 112 net::RequestPriority priority() const { return request_->priority(); } | |
| 113 int intraPriorityValue() const { return intra_priority_value_; } | |
|
James Simonsen
2014/02/07 03:45:09
For simple accessors, you're supposed to use intra
shatch
2014/02/10 19:55:47
Done.
| |
| 114 void setIntraPriorityValue(int intra_priority_value) { | |
| 115 intra_priority_value_ = intra_priority_value; | |
| 116 } | |
| 144 | 117 |
| 145 private: | 118 private: |
| 146 // ResourceMessageDelegate interface: | 119 // ResourceMessageDelegate interface: |
| 147 virtual bool OnMessageReceived(const IPC::Message& message, | 120 virtual bool OnMessageReceived(const IPC::Message& message, |
| 148 bool* message_was_ok) OVERRIDE { | 121 bool* message_was_ok) OVERRIDE { |
| 149 bool handled = true; | 122 bool handled = true; |
| 150 IPC_BEGIN_MESSAGE_MAP_EX(ScheduledResourceRequest, message, *message_was_ok) | 123 IPC_BEGIN_MESSAGE_MAP_EX(ScheduledResourceRequest, message, *message_was_ok) |
| 151 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority, DidChangePriority) | 124 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority, DidChangePriority) |
| 152 IPC_MESSAGE_UNHANDLED(handled = false) | 125 IPC_MESSAGE_UNHANDLED(handled = false) |
| 153 IPC_END_MESSAGE_MAP_EX() | 126 IPC_END_MESSAGE_MAP_EX() |
| 154 return handled; | 127 return handled; |
| 155 } | 128 } |
| 156 | 129 |
| 157 // ResourceThrottle interface: | 130 // ResourceThrottle interface: |
| 158 virtual void WillStartRequest(bool* defer) OVERRIDE { | 131 virtual void WillStartRequest(bool* defer) OVERRIDE { |
| 159 deferred_ = *defer = !ready_; | 132 deferred_ = *defer = !ready_; |
| 160 } | 133 } |
| 161 | 134 |
| 162 virtual const char* GetNameForLogging() const OVERRIDE { | 135 virtual const char* GetNameForLogging() const OVERRIDE { |
| 163 return "ResourceScheduler"; | 136 return "ResourceScheduler"; |
| 164 } | 137 } |
| 165 | 138 |
| 166 void DidChangePriority(int request_id, net::RequestPriority new_priority) { | 139 void DidChangePriority(int request_id, net::RequestPriority new_priority, |
| 167 scheduler_->ReprioritizeRequest(this, new_priority); | 140 int intra_priority_value) { |
| 141 scheduler_->ReprioritizeRequest(this, new_priority, intra_priority_value); | |
| 168 } | 142 } |
| 169 | 143 |
| 170 ClientId client_id_; | 144 ClientId client_id_; |
| 171 net::URLRequest* request_; | 145 net::URLRequest* request_; |
| 172 bool ready_; | 146 bool ready_; |
| 173 bool deferred_; | 147 bool deferred_; |
| 174 ResourceScheduler* scheduler_; | 148 ResourceScheduler* scheduler_; |
| 149 net::RequestPriority priority_; | |
|
James Simonsen
2014/02/07 03:45:09
This is unused.
shatch
2014/02/10 19:55:47
Done.
| |
| 150 int intra_priority_value_; | |
| 175 | 151 |
| 176 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest); | 152 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest); |
| 177 }; | 153 }; |
| 178 | 154 |
| 155 bool ResourceScheduler::ScheduledResourceSorter::operator()( | |
| 156 const ScheduledResourceRequest* a, | |
| 157 const ScheduledResourceRequest* b) const { | |
| 158 // Want the set to be ordered first by decreasing priority, then by | |
| 159 // decreasing intra_priority. | |
| 160 // ie. with (priority, intra_priority) | |
| 161 // [(1, 0), (1, 0), (0, 100), (0, 0)] | |
| 162 if (a->priority() == b->priority()) | |
| 163 return a->intraPriorityValue() > b->intraPriorityValue(); | |
| 164 return a->priority() > b->priority(); | |
| 165 } | |
| 166 | |
| 179 // Each client represents a tab. | 167 // Each client represents a tab. |
| 180 struct ResourceScheduler::Client { | 168 struct ResourceScheduler::Client { |
| 181 Client() : has_body(false), using_spdy_proxy(false) {} | 169 Client() : has_body(false), using_spdy_proxy(false) {} |
| 182 ~Client() {} | 170 ~Client() {} |
| 183 | 171 |
| 184 bool has_body; | 172 bool has_body; |
| 185 bool using_spdy_proxy; | 173 bool using_spdy_proxy; |
| 186 RequestQueue pending_requests; | 174 RequestQueue pending_requests; |
| 187 RequestSet in_flight_requests; | 175 RequestSet in_flight_requests; |
| 188 }; | 176 }; |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 212 // 3. The tab is closed while a RequestResource IPC is in flight. | 200 // 3. The tab is closed while a RequestResource IPC is in flight. |
| 213 unowned_requests_.insert(request.get()); | 201 unowned_requests_.insert(request.get()); |
| 214 request->Start(); | 202 request->Start(); |
| 215 return request.PassAs<ResourceThrottle>(); | 203 return request.PassAs<ResourceThrottle>(); |
| 216 } | 204 } |
| 217 | 205 |
| 218 Client* client = it->second; | 206 Client* client = it->second; |
| 219 if (ShouldStartRequest(request.get(), client) == START_REQUEST) { | 207 if (ShouldStartRequest(request.get(), client) == START_REQUEST) { |
| 220 StartRequest(request.get(), client); | 208 StartRequest(request.get(), client); |
| 221 } else { | 209 } else { |
| 222 client->pending_requests.Insert(request.get(), url_request->priority()); | 210 client->pending_requests.Insert(request.get()); |
| 223 } | 211 } |
| 224 return request.PassAs<ResourceThrottle>(); | 212 return request.PassAs<ResourceThrottle>(); |
| 225 } | 213 } |
| 226 | 214 |
| 227 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest* request) { | 215 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest* request) { |
| 228 DCHECK(CalledOnValidThread()); | 216 DCHECK(CalledOnValidThread()); |
| 229 if (ContainsKey(unowned_requests_, request)) { | 217 if (ContainsKey(unowned_requests_, request)) { |
| 230 unowned_requests_.erase(request); | 218 unowned_requests_.erase(request); |
| 231 return; | 219 return; |
| 232 } | 220 } |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 329 } | 317 } |
| 330 } | 318 } |
| 331 | 319 |
| 332 void ResourceScheduler::StartRequest(ScheduledResourceRequest* request, | 320 void ResourceScheduler::StartRequest(ScheduledResourceRequest* request, |
| 333 Client* client) { | 321 Client* client) { |
| 334 client->in_flight_requests.insert(request); | 322 client->in_flight_requests.insert(request); |
| 335 request->Start(); | 323 request->Start(); |
| 336 } | 324 } |
| 337 | 325 |
| 338 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest* request, | 326 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest* request, |
| 339 net::RequestPriority new_priority) { | 327 net::RequestPriority new_priority, |
| 328 int new_intra_priority_value) { | |
| 340 if (request->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS) { | 329 if (request->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS) { |
| 341 // We should not be re-prioritizing requests with the | 330 // We should not be re-prioritizing requests with the |
| 342 // IGNORE_LIMITS flag. | 331 // IGNORE_LIMITS flag. |
| 343 NOTREACHED(); | 332 NOTREACHED(); |
| 344 return; | 333 return; |
| 345 } | 334 } |
| 346 net::RequestPriority old_priority = request->url_request()->priority(); | 335 net::RequestPriority old_priority = request->url_request()->priority(); |
| 347 DCHECK_NE(new_priority, old_priority); | 336 int old_intra_priority = request->intraPriorityValue(); |
| 337 DCHECK(new_priority != old_priority || | |
|
James Simonsen
2014/02/07 03:45:09
willchan will complain about not having operator p
shatch
2014/02/10 19:55:47
Done.
| |
| 338 old_intra_priority != new_intra_priority_value); | |
| 348 request->url_request()->SetPriority(new_priority); | 339 request->url_request()->SetPriority(new_priority); |
| 340 request->setIntraPriorityValue(new_intra_priority_value); | |
| 349 ClientMap::iterator client_it = client_map_.find(request->client_id()); | 341 ClientMap::iterator client_it = client_map_.find(request->client_id()); |
| 350 if (client_it == client_map_.end()) { | 342 if (client_it == client_map_.end()) { |
| 351 // The client was likely deleted shortly before we received this IPC. | 343 // The client was likely deleted shortly before we received this IPC. |
| 352 return; | 344 return; |
| 353 } | 345 } |
| 354 | 346 |
| 355 Client *client = client_it->second; | 347 Client *client = client_it->second; |
| 356 if (!client->pending_requests.IsQueued(request)) { | 348 if (!client->pending_requests.IsQueued(request)) { |
| 357 DCHECK(ContainsKey(client->in_flight_requests, request)); | 349 DCHECK(ContainsKey(client->in_flight_requests, request)); |
| 358 // Request has already started. | 350 // Request has already started. |
| 359 return; | 351 return; |
| 360 } | 352 } |
| 361 | 353 |
| 362 client->pending_requests.Erase(request); | 354 client->pending_requests.Erase(request); |
| 363 client->pending_requests.Insert(request, | 355 client->pending_requests.Insert(request); |
| 364 request->url_request()->priority()); | |
| 365 | 356 |
| 366 if (new_priority > old_priority) { | 357 if (new_priority > old_priority) { |
| 367 // Check if this request is now able to load at its new priority. | 358 // Check if this request is now able to load at its new priority. |
| 368 LoadAnyStartablePendingRequests(client); | 359 LoadAnyStartablePendingRequests(client); |
| 369 } | 360 } |
| 370 } | 361 } |
| 371 | 362 |
| 372 void ResourceScheduler::LoadAnyStartablePendingRequests(Client* client) { | 363 void ResourceScheduler::LoadAnyStartablePendingRequests(Client* client) { |
| 373 // We iterate through all the pending requests, starting with the highest | 364 // We iterate through all the pending requests, starting with the highest |
| 374 // priority one. For each entry, one of three things can happen: | 365 // priority one. For each entry, one of three things can happen: |
| 375 // 1) We start the request, remove it from the list, and keep checking. | 366 // 1) We start the request, remove it from the list, and keep checking. |
| 376 // 2) We do NOT start the request, but ShouldStartRequest() signals us that | 367 // 2) We do NOT start the request, but ShouldStartRequest() signals us that |
| 377 // there may be room for other requests, so we keep checking and leave | 368 // there may be room for other requests, so we keep checking and leave |
| 378 // the previous request still in the list. | 369 // the previous request still in the list. |
| 379 // 3) We do not start the request, same as above, but StartRequest() tells | 370 // 3) We do not start the request, same as above, but StartRequest() tells |
| 380 // us there's no point in checking any further requests. | 371 // us there's no point in checking any further requests. |
| 381 | 372 |
| 382 RequestQueue::Iterator request_iter = | 373 for (RequestQueue::NetQueue::iterator request_iter = |
|
James Simonsen
2014/02/07 03:45:09
Is this what clang format comes up with? I'm not r
shatch
2014/02/10 19:55:47
Done.
| |
| 383 client->pending_requests.GetNextHighestIterator(); | 374 client->pending_requests.GetNextHighestIterator(); request_iter != |
| 384 | 375 client->pending_requests.End();) { |
| 385 while (!request_iter.is_null()) { | 376 ScheduledResourceRequest* request = *request_iter; |
| 386 ScheduledResourceRequest* request = request_iter.value(); | |
| 387 ShouldStartReqResult query_result = ShouldStartRequest(request, client); | 377 ShouldStartReqResult query_result = ShouldStartRequest(request, client); |
| 388 | 378 |
| 389 if (query_result == START_REQUEST) { | 379 if (query_result == START_REQUEST) { |
| 390 client->pending_requests.Erase(request); | 380 client->pending_requests.Erase(request); |
| 391 StartRequest(request, client); | 381 StartRequest(request, client); |
| 392 | 382 |
| 393 // StartRequest can modify the pending list, so we (re)start evaluation | 383 // StartRequest can modify the pending list, so we (re)start evaluation |
| 394 // from the currently highest priority request. Avoid copying a singular | 384 // from the currently highest priority request. Avoid copying a singular |
| 395 // iterator, which would trigger undefined behavior. | 385 // iterator, which would trigger undefined behavior. |
| 396 if (client->pending_requests.GetNextHighestIterator().is_null()) | 386 if (client->pending_requests.GetNextHighestIterator() == |
| 387 client->pending_requests.End()) | |
| 397 break; | 388 break; |
| 398 request_iter = client->pending_requests.GetNextHighestIterator(); | 389 request_iter = client->pending_requests.GetNextHighestIterator(); |
| 399 } else if (query_result == DO_NOT_START_REQUEST_AND_KEEP_SEARCHING) { | 390 } else if (query_result == DO_NOT_START_REQUEST_AND_KEEP_SEARCHING) { |
| 400 ++request_iter; | 391 ++request_iter; |
| 401 continue; | 392 continue; |
| 402 } else { | 393 } else { |
| 403 DCHECK(query_result == DO_NOT_START_REQUEST_AND_STOP_SEARCHING); | 394 DCHECK(query_result == DO_NOT_START_REQUEST_AND_STOP_SEARCHING); |
| 404 break; | 395 break; |
| 405 } | 396 } |
| 406 } | 397 } |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 514 | 505 |
| 515 return START_REQUEST; | 506 return START_REQUEST; |
| 516 } | 507 } |
| 517 | 508 |
| 518 ResourceScheduler::ClientId ResourceScheduler::MakeClientId( | 509 ResourceScheduler::ClientId ResourceScheduler::MakeClientId( |
| 519 int child_id, int route_id) { | 510 int child_id, int route_id) { |
| 520 return (static_cast<ResourceScheduler::ClientId>(child_id) << 32) | route_id; | 511 return (static_cast<ResourceScheduler::ClientId>(child_id) << 32) | route_id; |
| 521 } | 512 } |
| 522 | 513 |
| 523 } // namespace content | 514 } // namespace content |
| OLD | NEW |