| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/browser/worker_host/worker_service.h" | |
| 6 | |
| 7 #include <string> | |
| 8 | |
| 9 #include "base/command_line.h" | |
| 10 #include "base/logging.h" | |
| 11 #include "base/sys_info.h" | |
| 12 #include "base/threading/thread.h" | |
| 13 #include "content/browser/resource_context.h" | |
| 14 #include "content/browser/worker_host/worker_message_filter.h" | |
| 15 #include "content/browser/worker_host/worker_process_host.h" | |
| 16 #include "content/browser/worker_host/worker_service_observer.h" | |
| 17 #include "content/common/view_messages.h" | |
| 18 #include "content/common/worker_messages.h" | |
| 19 #include "content/public/common/content_switches.h" | |
| 20 #include "content/public/common/process_type.h" | |
| 21 #include "net/base/registry_controlled_domain.h" | |
| 22 | |
| 23 using content::BrowserThread; | |
| 24 | |
| 25 const int WorkerService::kMaxWorkerProcessesWhenSharing = 10; | |
| 26 const int WorkerService::kMaxWorkersWhenSeparate = 64; | |
| 27 const int WorkerService::kMaxWorkersPerTabWhenSeparate = 16; | |
| 28 | |
| 29 WorkerService* WorkerService::GetInstance() { | |
| 30 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 31 return Singleton<WorkerService>::get(); | |
| 32 } | |
| 33 | |
| 34 WorkerService::WorkerService() : next_worker_route_id_(0) { | |
| 35 } | |
| 36 | |
| 37 WorkerService::~WorkerService() { | |
| 38 // The observers in observers_ can't be used here because they might be | |
| 39 // gone already. | |
| 40 } | |
| 41 | |
| 42 void WorkerService::OnWorkerMessageFilterClosing(WorkerMessageFilter* filter) { | |
| 43 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 44 !iter.Done(); ++iter) { | |
| 45 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 46 worker->FilterShutdown(filter); | |
| 47 } | |
| 48 | |
| 49 // See if that process had any queued workers. | |
| 50 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); | |
| 51 i != queued_workers_.end();) { | |
| 52 i->RemoveFilters(filter); | |
| 53 if (i->NumFilters() == 0) { | |
| 54 i = queued_workers_.erase(i); | |
| 55 } else { | |
| 56 ++i; | |
| 57 } | |
| 58 } | |
| 59 | |
| 60 // Also, see if that process had any pending shared workers. | |
| 61 for (WorkerProcessHost::Instances::iterator iter = | |
| 62 pending_shared_workers_.begin(); | |
| 63 iter != pending_shared_workers_.end(); ) { | |
| 64 iter->worker_document_set()->RemoveAll(filter); | |
| 65 if (iter->worker_document_set()->IsEmpty()) { | |
| 66 iter = pending_shared_workers_.erase(iter); | |
| 67 } else { | |
| 68 ++iter; | |
| 69 } | |
| 70 } | |
| 71 | |
| 72 // Either a worker proceess has shut down, in which case we can start one of | |
| 73 // the queued workers, or a renderer has shut down, in which case it doesn't | |
| 74 // affect anything. We call this function in both scenarios because then we | |
| 75 // don't have to keep track which filters are from worker processes. | |
| 76 TryStartingQueuedWorker(); | |
| 77 } | |
| 78 | |
| 79 void WorkerService::CreateWorker( | |
| 80 const ViewHostMsg_CreateWorker_Params& params, | |
| 81 int route_id, | |
| 82 WorkerMessageFilter* filter, | |
| 83 const content::ResourceContext& resource_context) { | |
| 84 // Generate a unique route id for the browser-worker communication that's | |
| 85 // unique among all worker processes. That way when the worker process sends | |
| 86 // a wrapped IPC message through us, we know which WorkerProcessHost to give | |
| 87 // it to. | |
| 88 WorkerProcessHost::WorkerInstance instance( | |
| 89 params.url, | |
| 90 params.name, | |
| 91 next_worker_route_id(), | |
| 92 0, | |
| 93 params.script_resource_appcache_id, | |
| 94 &resource_context); | |
| 95 instance.AddFilter(filter, route_id); | |
| 96 instance.worker_document_set()->Add( | |
| 97 filter, params.document_id, filter->render_process_id(), | |
| 98 params.render_view_route_id); | |
| 99 | |
| 100 CreateWorkerFromInstance(instance); | |
| 101 } | |
| 102 | |
| 103 void WorkerService::LookupSharedWorker( | |
| 104 const ViewHostMsg_CreateWorker_Params& params, | |
| 105 int route_id, | |
| 106 WorkerMessageFilter* filter, | |
| 107 const content::ResourceContext* resource_context, | |
| 108 bool* exists, | |
| 109 bool* url_mismatch) { | |
| 110 *exists = true; | |
| 111 WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( | |
| 112 params.url, params.name, resource_context); | |
| 113 | |
| 114 if (!instance) { | |
| 115 // If no worker instance currently exists, we need to create a pending | |
| 116 // instance - this is to make sure that any subsequent lookups passing a | |
| 117 // mismatched URL get the appropriate url_mismatch error at lookup time. | |
| 118 // Having named shared workers was a Really Bad Idea due to details like | |
| 119 // this. | |
| 120 instance = CreatePendingInstance(params.url, params.name, resource_context); | |
| 121 *exists = false; | |
| 122 } | |
| 123 | |
| 124 // Make sure the passed-in instance matches the URL - if not, return an | |
| 125 // error. | |
| 126 if (params.url != instance->url()) { | |
| 127 *url_mismatch = true; | |
| 128 *exists = false; | |
| 129 } else { | |
| 130 *url_mismatch = false; | |
| 131 // Add our route ID to the existing instance so we can send messages to it. | |
| 132 instance->AddFilter(filter, route_id); | |
| 133 | |
| 134 // Add the passed filter/document_id to the worker instance. | |
| 135 // TODO(atwilson): This won't work if the message is from a worker process. | |
| 136 // We don't support that yet though (this message is only sent from | |
| 137 // renderers) but when we do, we'll need to add code to pass in the current | |
| 138 // worker's document set for nested workers. | |
| 139 instance->worker_document_set()->Add( | |
| 140 filter, params.document_id, filter->render_process_id(), | |
| 141 params.render_view_route_id); | |
| 142 } | |
| 143 } | |
| 144 | |
| 145 void WorkerService::CancelCreateDedicatedWorker( | |
| 146 int route_id, | |
| 147 WorkerMessageFilter* filter) { | |
| 148 | |
| 149 NOTREACHED(); | |
| 150 } | |
| 151 | |
| 152 void WorkerService::ForwardToWorker(const IPC::Message& message, | |
| 153 WorkerMessageFilter* filter) { | |
| 154 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 155 !iter.Done(); ++iter) { | |
| 156 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 157 if (worker->FilterMessage(message, filter)) | |
| 158 return; | |
| 159 } | |
| 160 | |
| 161 // TODO(jabdelmalek): tell filter that callee is gone | |
| 162 } | |
| 163 | |
| 164 void WorkerService::DocumentDetached(unsigned long long document_id, | |
| 165 WorkerMessageFilter* filter) { | |
| 166 // Any associated shared workers can be shut down. | |
| 167 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 168 !iter.Done(); ++iter) { | |
| 169 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 170 worker->DocumentDetached(filter, document_id); | |
| 171 } | |
| 172 | |
| 173 // Remove any queued shared workers for this document. | |
| 174 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | |
| 175 iter != queued_workers_.end();) { | |
| 176 | |
| 177 iter->worker_document_set()->Remove(filter, document_id); | |
| 178 if (iter->worker_document_set()->IsEmpty()) { | |
| 179 iter = queued_workers_.erase(iter); | |
| 180 continue; | |
| 181 } | |
| 182 ++iter; | |
| 183 } | |
| 184 | |
| 185 // Remove the document from any pending shared workers. | |
| 186 for (WorkerProcessHost::Instances::iterator iter = | |
| 187 pending_shared_workers_.begin(); | |
| 188 iter != pending_shared_workers_.end(); ) { | |
| 189 iter->worker_document_set()->Remove(filter, document_id); | |
| 190 if (iter->worker_document_set()->IsEmpty()) { | |
| 191 iter = pending_shared_workers_.erase(iter); | |
| 192 } else { | |
| 193 ++iter; | |
| 194 } | |
| 195 } | |
| 196 } | |
| 197 | |
| 198 bool WorkerService::CreateWorkerFromInstance( | |
| 199 WorkerProcessHost::WorkerInstance instance) { | |
| 200 // TODO(michaeln): We need to ensure that a process is working | |
| 201 // on behalf of a single browser context. The process sharing logic below | |
| 202 // does not ensure that. Consider making WorkerService a per browser context | |
| 203 // object to help with this. | |
| 204 WorkerProcessHost* worker = NULL; | |
| 205 if (CommandLine::ForCurrentProcess()->HasSwitch( | |
| 206 switches::kWebWorkerProcessPerCore)) { | |
| 207 worker = GetProcessToFillUpCores(); | |
| 208 } else if (CommandLine::ForCurrentProcess()->HasSwitch( | |
| 209 switches::kWebWorkerShareProcesses)) { | |
| 210 worker = GetProcessForDomain(instance.url()); | |
| 211 } else { // One process per worker. | |
| 212 if (!CanCreateWorkerProcess(instance)) { | |
| 213 queued_workers_.push_back(instance); | |
| 214 return true; | |
| 215 } | |
| 216 } | |
| 217 | |
| 218 // Check to see if this shared worker is already running (two pages may have | |
| 219 // tried to start up the worker simultaneously). | |
| 220 // See if a worker with this name already exists. | |
| 221 WorkerProcessHost::WorkerInstance* existing_instance = | |
| 222 FindSharedWorkerInstance( | |
| 223 instance.url(), instance.name(), instance.resource_context()); | |
| 224 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = | |
| 225 instance.GetFilter(); | |
| 226 // If this worker is already running, no need to create a new copy. Just | |
| 227 // inform the caller that the worker has been created. | |
| 228 if (existing_instance) { | |
| 229 // Walk the worker's filter list to see if this client is listed. If not, | |
| 230 // then it means that the worker started by the client already exited so | |
| 231 // we should not attach to this new one (http://crbug.com/29243). | |
| 232 if (!existing_instance->HasFilter(filter_info.first, filter_info.second)) | |
| 233 return false; | |
| 234 filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second)); | |
| 235 return true; | |
| 236 } | |
| 237 | |
| 238 // Look to see if there's a pending instance. | |
| 239 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( | |
| 240 instance.url(), instance.name(), instance.resource_context()); | |
| 241 // If there's no instance *and* no pending instance (or there is a pending | |
| 242 // instance but it does not contain our filter info), then it means the | |
| 243 // worker started up and exited already. Log a warning because this should | |
| 244 // be a very rare occurrence and is probably a bug, but it *can* happen so | |
| 245 // handle it gracefully. | |
| 246 if (!pending || | |
| 247 !pending->HasFilter(filter_info.first, filter_info.second)) { | |
| 248 DLOG(WARNING) << "Pending worker already exited"; | |
| 249 return false; | |
| 250 } | |
| 251 | |
| 252 // Assign the accumulated document set and filter list for this pending | |
| 253 // worker to the new instance. | |
| 254 DCHECK(!pending->worker_document_set()->IsEmpty()); | |
| 255 instance.ShareDocumentSet(*pending); | |
| 256 for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i = | |
| 257 pending->filters().begin(); | |
| 258 i != pending->filters().end(); ++i) { | |
| 259 instance.AddFilter(i->first, i->second); | |
| 260 } | |
| 261 RemovePendingInstances( | |
| 262 instance.url(), instance.name(), instance.resource_context()); | |
| 263 | |
| 264 // Remove any queued instances of this worker and copy over the filter to | |
| 265 // this instance. | |
| 266 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | |
| 267 iter != queued_workers_.end();) { | |
| 268 if (iter->Matches(instance.url(), instance.name(), | |
| 269 instance.resource_context())) { | |
| 270 DCHECK(iter->NumFilters() == 1); | |
| 271 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = | |
| 272 iter->GetFilter(); | |
| 273 instance.AddFilter(filter_info.first, filter_info.second); | |
| 274 iter = queued_workers_.erase(iter); | |
| 275 } else { | |
| 276 ++iter; | |
| 277 } | |
| 278 } | |
| 279 | |
| 280 if (!worker) { | |
| 281 WorkerMessageFilter* first_filter = instance.filters().begin()->first; | |
| 282 worker = new WorkerProcessHost( | |
| 283 instance.resource_context(), | |
| 284 first_filter->resource_dispatcher_host()); | |
| 285 // TODO(atwilson): This won't work if the message is from a worker process. | |
| 286 // We don't support that yet though (this message is only sent from | |
| 287 // renderers) but when we do, we'll need to add code to pass in the current | |
| 288 // worker's document set for nested workers. | |
| 289 if (!worker->Init(first_filter->render_process_id())) { | |
| 290 delete worker; | |
| 291 return false; | |
| 292 } | |
| 293 } | |
| 294 | |
| 295 // TODO(michaeln): As written, test can fail per my earlier comment in | |
| 296 // this method, but that's a bug. | |
| 297 // DCHECK(worker->request_context() == instance.request_context()); | |
| 298 | |
| 299 worker->CreateWorker(instance); | |
| 300 FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, | |
| 301 WorkerCreated(worker, instance)); | |
| 302 return true; | |
| 303 } | |
| 304 | |
| 305 WorkerProcessHost* WorkerService::GetProcessForDomain(const GURL& url) { | |
| 306 int num_processes = 0; | |
| 307 std::string domain = | |
| 308 net::RegistryControlledDomainService::GetDomainAndRegistry(url); | |
| 309 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 310 !iter.Done(); ++iter) { | |
| 311 num_processes++; | |
| 312 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 313 for (WorkerProcessHost::Instances::const_iterator instance = | |
| 314 worker->instances().begin(); | |
| 315 instance != worker->instances().end(); ++instance) { | |
| 316 if (net::RegistryControlledDomainService::GetDomainAndRegistry( | |
| 317 instance->url()) == domain) { | |
| 318 return worker; | |
| 319 } | |
| 320 } | |
| 321 } | |
| 322 | |
| 323 if (num_processes >= kMaxWorkerProcessesWhenSharing) | |
| 324 return GetLeastLoadedWorker(); | |
| 325 | |
| 326 return NULL; | |
| 327 } | |
| 328 | |
| 329 WorkerProcessHost* WorkerService::GetProcessToFillUpCores() { | |
| 330 int num_processes = 0; | |
| 331 BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 332 for (; !iter.Done(); ++iter) | |
| 333 num_processes++; | |
| 334 | |
| 335 if (num_processes >= base::SysInfo::NumberOfProcessors()) | |
| 336 return GetLeastLoadedWorker(); | |
| 337 | |
| 338 return NULL; | |
| 339 } | |
| 340 | |
| 341 WorkerProcessHost* WorkerService::GetLeastLoadedWorker() { | |
| 342 WorkerProcessHost* smallest = NULL; | |
| 343 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 344 !iter.Done(); ++iter) { | |
| 345 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 346 if (!smallest || worker->instances().size() < smallest->instances().size()) | |
| 347 smallest = worker; | |
| 348 } | |
| 349 | |
| 350 return smallest; | |
| 351 } | |
| 352 | |
| 353 bool WorkerService::CanCreateWorkerProcess( | |
| 354 const WorkerProcessHost::WorkerInstance& instance) { | |
| 355 // Worker can be fired off if *any* parent has room. | |
| 356 const WorkerDocumentSet::DocumentInfoSet& parents = | |
| 357 instance.worker_document_set()->documents(); | |
| 358 | |
| 359 for (WorkerDocumentSet::DocumentInfoSet::const_iterator parent_iter = | |
| 360 parents.begin(); | |
| 361 parent_iter != parents.end(); ++parent_iter) { | |
| 362 bool hit_total_worker_limit = false; | |
| 363 if (TabCanCreateWorkerProcess(parent_iter->render_process_id(), | |
| 364 parent_iter->render_view_id(), | |
| 365 &hit_total_worker_limit)) { | |
| 366 return true; | |
| 367 } | |
| 368 // Return false if already at the global worker limit (no need to continue | |
| 369 // checking parent tabs). | |
| 370 if (hit_total_worker_limit) | |
| 371 return false; | |
| 372 } | |
| 373 // If we've reached here, none of the parent tabs is allowed to create an | |
| 374 // instance. | |
| 375 return false; | |
| 376 } | |
| 377 | |
| 378 bool WorkerService::TabCanCreateWorkerProcess(int render_process_id, | |
| 379 int render_view_id, | |
| 380 bool* hit_total_worker_limit) { | |
| 381 int total_workers = 0; | |
| 382 int workers_per_tab = 0; | |
| 383 *hit_total_worker_limit = false; | |
| 384 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 385 !iter.Done(); ++iter) { | |
| 386 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 387 for (WorkerProcessHost::Instances::const_iterator cur_instance = | |
| 388 worker->instances().begin(); | |
| 389 cur_instance != worker->instances().end(); ++cur_instance) { | |
| 390 total_workers++; | |
| 391 if (total_workers >= kMaxWorkersWhenSeparate) { | |
| 392 *hit_total_worker_limit = true; | |
| 393 return false; | |
| 394 } | |
| 395 if (cur_instance->RendererIsParent(render_process_id, render_view_id)) { | |
| 396 workers_per_tab++; | |
| 397 if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) | |
| 398 return false; | |
| 399 } | |
| 400 } | |
| 401 } | |
| 402 | |
| 403 return true; | |
| 404 } | |
| 405 | |
| 406 void WorkerService::TryStartingQueuedWorker() { | |
| 407 if (queued_workers_.empty()) | |
| 408 return; | |
| 409 | |
| 410 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); | |
| 411 i != queued_workers_.end();) { | |
| 412 if (CanCreateWorkerProcess(*i)) { | |
| 413 WorkerProcessHost::WorkerInstance instance = *i; | |
| 414 queued_workers_.erase(i); | |
| 415 CreateWorkerFromInstance(instance); | |
| 416 | |
| 417 // CreateWorkerFromInstance can modify the queued_workers_ list when it | |
| 418 // coalesces queued instances after starting a shared worker, so we | |
| 419 // have to rescan the list from the beginning (our iterator is now | |
| 420 // invalid). This is not a big deal as having any queued workers will be | |
| 421 // rare in practice so the list will be small. | |
| 422 i = queued_workers_.begin(); | |
| 423 } else { | |
| 424 ++i; | |
| 425 } | |
| 426 } | |
| 427 } | |
| 428 | |
| 429 bool WorkerService::GetRendererForWorker(int worker_process_id, | |
| 430 int* render_process_id, | |
| 431 int* render_view_id) const { | |
| 432 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 433 !iter.Done(); ++iter) { | |
| 434 if (iter->id() != worker_process_id) | |
| 435 continue; | |
| 436 | |
| 437 // This code assumes one worker per process, see function comment in header! | |
| 438 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 439 WorkerProcessHost::Instances::const_iterator first_instance = | |
| 440 worker->instances().begin(); | |
| 441 if (first_instance == worker->instances().end()) | |
| 442 return false; | |
| 443 | |
| 444 WorkerDocumentSet::DocumentInfoSet::const_iterator info = | |
| 445 first_instance->worker_document_set()->documents().begin(); | |
| 446 *render_process_id = info->render_process_id(); | |
| 447 *render_view_id = info->render_view_id(); | |
| 448 return true; | |
| 449 } | |
| 450 return false; | |
| 451 } | |
| 452 | |
| 453 const WorkerProcessHost::WorkerInstance* WorkerService::FindWorkerInstance( | |
| 454 int worker_process_id) { | |
| 455 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 456 !iter.Done(); ++iter) { | |
| 457 if (iter->id() != worker_process_id) | |
| 458 continue; | |
| 459 | |
| 460 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 461 WorkerProcessHost::Instances::const_iterator instance = | |
| 462 worker->instances().begin(); | |
| 463 return instance == worker->instances().end() ? NULL : &*instance; | |
| 464 } | |
| 465 return NULL; | |
| 466 } | |
| 467 | |
| 468 void WorkerService::AddObserver(WorkerServiceObserver* observer) { | |
| 469 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 470 observers_.AddObserver(observer); | |
| 471 } | |
| 472 | |
| 473 void WorkerService::RemoveObserver(WorkerServiceObserver* observer) { | |
| 474 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 475 observers_.RemoveObserver(observer); | |
| 476 } | |
| 477 | |
| 478 void WorkerService::NotifyWorkerDestroyed( | |
| 479 WorkerProcessHost* process, | |
| 480 int worker_route_id) { | |
| 481 FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, | |
| 482 WorkerDestroyed(process, worker_route_id)); | |
| 483 } | |
| 484 | |
| 485 void WorkerService::NotifyWorkerContextStarted(WorkerProcessHost* process, | |
| 486 int worker_route_id) { | |
| 487 FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, | |
| 488 WorkerContextStarted(process, worker_route_id)); | |
| 489 } | |
| 490 | |
| 491 WorkerProcessHost::WorkerInstance* | |
| 492 WorkerService::FindSharedWorkerInstance( | |
| 493 const GURL& url, | |
| 494 const string16& name, | |
| 495 const content::ResourceContext* resource_context) { | |
| 496 for (BrowserChildProcessHost::Iterator iter(content::PROCESS_TYPE_WORKER); | |
| 497 !iter.Done(); ++iter) { | |
| 498 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 499 for (WorkerProcessHost::Instances::iterator instance_iter = | |
| 500 worker->mutable_instances().begin(); | |
| 501 instance_iter != worker->mutable_instances().end(); | |
| 502 ++instance_iter) { | |
| 503 if (instance_iter->Matches(url, name, resource_context)) | |
| 504 return &(*instance_iter); | |
| 505 } | |
| 506 } | |
| 507 return NULL; | |
| 508 } | |
| 509 | |
| 510 WorkerProcessHost::WorkerInstance* | |
| 511 WorkerService::FindPendingInstance( | |
| 512 const GURL& url, | |
| 513 const string16& name, | |
| 514 const content::ResourceContext* resource_context) { | |
| 515 // Walk the pending instances looking for a matching pending worker. | |
| 516 for (WorkerProcessHost::Instances::iterator iter = | |
| 517 pending_shared_workers_.begin(); | |
| 518 iter != pending_shared_workers_.end(); | |
| 519 ++iter) { | |
| 520 if (iter->Matches(url, name, resource_context)) { | |
| 521 return &(*iter); | |
| 522 } | |
| 523 } | |
| 524 return NULL; | |
| 525 } | |
| 526 | |
| 527 | |
| 528 void WorkerService::RemovePendingInstances( | |
| 529 const GURL& url, | |
| 530 const string16& name, | |
| 531 const content::ResourceContext* resource_context) { | |
| 532 // Walk the pending instances looking for a matching pending worker. | |
| 533 for (WorkerProcessHost::Instances::iterator iter = | |
| 534 pending_shared_workers_.begin(); | |
| 535 iter != pending_shared_workers_.end(); ) { | |
| 536 if (iter->Matches(url, name, resource_context)) { | |
| 537 iter = pending_shared_workers_.erase(iter); | |
| 538 } else { | |
| 539 ++iter; | |
| 540 } | |
| 541 } | |
| 542 } | |
| 543 | |
| 544 WorkerProcessHost::WorkerInstance* | |
| 545 WorkerService::CreatePendingInstance( | |
| 546 const GURL& url, | |
| 547 const string16& name, | |
| 548 const content::ResourceContext* resource_context) { | |
| 549 // Look for an existing pending shared worker. | |
| 550 WorkerProcessHost::WorkerInstance* instance = | |
| 551 FindPendingInstance(url, name, resource_context); | |
| 552 if (instance) | |
| 553 return instance; | |
| 554 | |
| 555 // No existing pending worker - create a new one. | |
| 556 WorkerProcessHost::WorkerInstance pending(url, true, name, resource_context); | |
| 557 pending_shared_workers_.push_back(pending); | |
| 558 return &pending_shared_workers_.back(); | |
| 559 } | |
| OLD | NEW |