OLD | NEW |
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/browser/worker_host/worker_service.h" | 5 #include "chrome/browser/worker_host/worker_service.h" |
6 | 6 |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/singleton.h" | 8 #include "base/singleton.h" |
9 #include "base/sys_info.h" | 9 #include "base/sys_info.h" |
10 #include "base/thread.h" | 10 #include "base/thread.h" |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 resource_dispatcher_host_ = rdh; | 42 resource_dispatcher_host_ = rdh; |
43 } | 43 } |
44 | 44 |
45 WorkerService::~WorkerService() { | 45 WorkerService::~WorkerService() { |
46 } | 46 } |
47 | 47 |
48 bool WorkerService::CreateWorker(const GURL &url, | 48 bool WorkerService::CreateWorker(const GURL &url, |
49 bool is_shared, | 49 bool is_shared, |
50 bool off_the_record, | 50 bool off_the_record, |
51 const string16& name, | 51 const string16& name, |
| 52 unsigned long long document_id, |
52 int renderer_id, | 53 int renderer_id, |
53 int render_view_route_id, | 54 int render_view_route_id, |
54 IPC::Message::Sender* sender, | 55 IPC::Message::Sender* sender, |
55 int sender_route_id) { | 56 int sender_route_id) { |
56 // Generate a unique route id for the browser-worker communication that's | 57 // Generate a unique route id for the browser-worker communication that's |
57 // unique among all worker processes. That way when the worker process sends | 58 // unique among all worker processes. That way when the worker process sends |
58 // a wrapped IPC message through us, we know which WorkerProcessHost to give | 59 // a wrapped IPC message through us, we know which WorkerProcessHost to give |
59 // it to. | 60 // it to. |
60 WorkerProcessHost::WorkerInstance instance(url, | 61 WorkerProcessHost::WorkerInstance instance(url, |
61 is_shared, | 62 is_shared, |
62 off_the_record, | 63 off_the_record, |
63 name, | 64 name, |
64 renderer_id, | |
65 render_view_route_id, | |
66 next_worker_route_id()); | 65 next_worker_route_id()); |
67 instance.AddSender(sender, sender_route_id); | 66 instance.AddSender(sender, sender_route_id); |
| 67 instance.worker_document_set()->Add( |
| 68 sender, document_id, renderer_id, render_view_route_id); |
68 | 69 |
69 WorkerProcessHost* worker = NULL; | 70 WorkerProcessHost* worker = NULL; |
70 if (CommandLine::ForCurrentProcess()->HasSwitch( | 71 if (CommandLine::ForCurrentProcess()->HasSwitch( |
71 switches::kWebWorkerProcessPerCore)) { | 72 switches::kWebWorkerProcessPerCore)) { |
72 worker = GetProcessToFillUpCores(); | 73 worker = GetProcessToFillUpCores(); |
73 } else if (CommandLine::ForCurrentProcess()->HasSwitch( | 74 } else if (CommandLine::ForCurrentProcess()->HasSwitch( |
74 switches::kWebWorkerShareProcesses)) { | 75 switches::kWebWorkerShareProcesses)) { |
75 worker = GetProcessForDomain(url); | 76 worker = GetProcessForDomain(url); |
76 } else { // One process per worker. | 77 } else { // One process per worker. |
77 if (!CanCreateWorkerProcess(instance)) { | 78 if (!CanCreateWorkerProcess(instance)) { |
78 queued_workers_.push_back(instance); | 79 queued_workers_.push_back(instance); |
79 return true; | 80 return true; |
80 } | 81 } |
81 } | 82 } |
82 | 83 |
83 // Check to see if this shared worker is already running (two pages may have | 84 // Check to see if this shared worker is already running (two pages may have |
84 // tried to start up the worker simultaneously). | 85 // tried to start up the worker simultaneously). |
85 if (is_shared) { | 86 if (is_shared) { |
86 // See if a worker with this name already exists. | 87 // See if a worker with this name already exists. |
87 WorkerProcessHost::WorkerInstance* existing_instance = | 88 WorkerProcessHost::WorkerInstance* existing_instance = |
88 FindSharedWorkerInstance(url, name, off_the_record); | 89 FindSharedWorkerInstance(url, name, off_the_record); |
89 // If this worker is already running, no need to create a new copy. Just | 90 // If this worker is already running, no need to create a new copy. Just |
90 // inform the caller that the worker has been created. | 91 // inform the caller that the worker has been created. |
91 if (existing_instance) { | 92 if (existing_instance) { |
| 93 // TODO(atwilson): Change this to scan the sender list (crbug.com/29243). |
92 existing_instance->AddSender(sender, sender_route_id); | 94 existing_instance->AddSender(sender, sender_route_id); |
93 sender->Send(new ViewMsg_WorkerCreated(sender_route_id)); | 95 sender->Send(new ViewMsg_WorkerCreated(sender_route_id)); |
94 return true; | 96 return true; |
95 } | 97 } |
96 | 98 |
97 // Look to see if there's a pending instance. | 99 // Look to see if there's a pending instance. |
98 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( | 100 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( |
99 url, name, off_the_record); | 101 url, name, off_the_record); |
100 // If there's no instance *and* no pending instance, then it means the | 102 // If there's no instance *and* no pending instance, then it means the |
101 // worker started up and exited already. Log a warning because this should | 103 // worker started up and exited already. Log a warning because this should |
102 // be a very rare occurrence and is probably a bug, but it *can* happen so | 104 // be a very rare occurrence and is probably a bug, but it *can* happen so |
103 // handle it gracefully. | 105 // handle it gracefully. |
104 if (!pending) { | 106 if (!pending) { |
105 DLOG(WARNING) << "Pending worker already exited"; | 107 DLOG(WARNING) << "Pending worker already exited"; |
106 return false; | 108 return false; |
107 } | 109 } |
108 | 110 |
109 // Assign the accumulated document set and sender list for this pending | 111 // Assign the accumulated document set and sender list for this pending |
110 // worker to the new instance. | 112 // worker to the new instance. |
111 DCHECK(!pending->IsDocumentSetEmpty()); | 113 DCHECK(!pending->worker_document_set()->IsEmpty()); |
112 instance.CopyDocumentSet(*pending); | 114 instance.ShareDocumentSet(*pending); |
113 RemovePendingInstance(url, name, off_the_record); | 115 RemovePendingInstance(url, name, off_the_record); |
114 } | 116 } |
115 | 117 |
116 if (!worker) { | 118 if (!worker) { |
117 worker = new WorkerProcessHost(resource_dispatcher_host_); | 119 worker = new WorkerProcessHost(resource_dispatcher_host_); |
118 if (!worker->Init()) { | 120 if (!worker->Init()) { |
119 delete worker; | 121 delete worker; |
120 return false; | 122 return false; |
121 } | 123 } |
122 } | 124 } |
123 | 125 |
124 worker->CreateWorker(instance); | 126 worker->CreateWorker(instance); |
125 return true; | 127 return true; |
126 } | 128 } |
127 | 129 |
128 bool WorkerService::LookupSharedWorker(const GURL &url, | 130 bool WorkerService::LookupSharedWorker(const GURL &url, |
129 const string16& name, | 131 const string16& name, |
130 bool off_the_record, | 132 bool off_the_record, |
131 unsigned long long document_id, | 133 unsigned long long document_id, |
| 134 int renderer_id, |
| 135 int render_view_route_id, |
132 IPC::Message::Sender* sender, | 136 IPC::Message::Sender* sender, |
133 int sender_route_id, | 137 int sender_route_id, |
134 bool* url_mismatch) { | 138 bool* url_mismatch) { |
135 bool found_instance = true; | 139 bool found_instance = true; |
136 WorkerProcessHost::WorkerInstance* instance = | 140 WorkerProcessHost::WorkerInstance* instance = |
137 FindSharedWorkerInstance(url, name, off_the_record); | 141 FindSharedWorkerInstance(url, name, off_the_record); |
138 | 142 |
139 if (!instance) { | 143 if (!instance) { |
140 // If no worker instance currently exists, we need to create a pending | 144 // If no worker instance currently exists, we need to create a pending |
141 // instance - this is to make sure that any subsequent lookups passing a | 145 // instance - this is to make sure that any subsequent lookups passing a |
(...skipping 11 matching lines...) Expand all Loading... |
153 return false; | 157 return false; |
154 } else { | 158 } else { |
155 *url_mismatch = false; | 159 *url_mismatch = false; |
156 } | 160 } |
157 | 161 |
158 // Add our route ID to the existing instance so we can send messages to it. | 162 // Add our route ID to the existing instance so we can send messages to it. |
159 if (found_instance) | 163 if (found_instance) |
160 instance->AddSender(sender, sender_route_id); | 164 instance->AddSender(sender, sender_route_id); |
161 | 165 |
162 // Add the passed sender/document_id to the worker instance. | 166 // Add the passed sender/document_id to the worker instance. |
163 instance->AddToDocumentSet(sender, document_id); | 167 instance->worker_document_set()->Add( |
| 168 sender, document_id, renderer_id, render_view_route_id); |
164 return found_instance; | 169 return found_instance; |
165 } | 170 } |
166 | 171 |
167 void WorkerService::DocumentDetached(IPC::Message::Sender* sender, | 172 void WorkerService::DocumentDetached(IPC::Message::Sender* sender, |
168 unsigned long long document_id) { | 173 unsigned long long document_id) { |
169 for (ChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | 174 for (ChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
170 !iter.Done(); ++iter) { | 175 !iter.Done(); ++iter) { |
171 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 176 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
172 worker->DocumentDetached(sender, document_id); | 177 worker->DocumentDetached(sender, document_id); |
173 } | 178 } |
174 | 179 |
175 // Remove any queued shared workers for this document. | 180 // Remove any queued shared workers for this document. |
176 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | 181 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); |
177 iter != queued_workers_.end();) { | 182 iter != queued_workers_.end();) { |
178 if (iter->shared()) { | 183 if (iter->shared()) { |
179 iter->RemoveFromDocumentSet(sender, document_id); | 184 iter->worker_document_set()->Remove(sender, document_id); |
180 if (iter->IsDocumentSetEmpty()) { | 185 if (iter->worker_document_set()->IsEmpty()) { |
181 iter = queued_workers_.erase(iter); | 186 iter = queued_workers_.erase(iter); |
182 continue; | 187 continue; |
183 } | 188 } |
184 } | 189 } |
185 ++iter; | 190 ++iter; |
186 } | 191 } |
187 | 192 |
188 // Remove the document from any pending shared workers. | 193 // Remove the document from any pending shared workers. |
189 for (WorkerProcessHost::Instances::iterator iter = | 194 for (WorkerProcessHost::Instances::iterator iter = |
190 pending_shared_workers_.begin(); | 195 pending_shared_workers_.begin(); |
191 iter != pending_shared_workers_.end(); ) { | 196 iter != pending_shared_workers_.end(); ) { |
192 iter->RemoveFromDocumentSet(sender, document_id); | 197 iter->worker_document_set()->Remove(sender, document_id); |
193 if (iter->IsDocumentSetEmpty()) { | 198 if (iter->worker_document_set()->IsEmpty()) { |
194 iter = pending_shared_workers_.erase(iter); | 199 iter = pending_shared_workers_.erase(iter); |
195 } else { | 200 } else { |
196 ++iter; | 201 ++iter; |
197 } | 202 } |
198 } | 203 } |
199 | 204 |
200 } | 205 } |
201 | 206 |
202 void WorkerService::CancelCreateDedicatedWorker(IPC::Message::Sender* sender, | 207 void WorkerService::CancelCreateDedicatedWorker(IPC::Message::Sender* sender, |
203 int sender_route_id) { | 208 int sender_route_id) { |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
287 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 292 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
288 if (!smallest || worker->instances().size() < smallest->instances().size()) | 293 if (!smallest || worker->instances().size() < smallest->instances().size()) |
289 smallest = worker; | 294 smallest = worker; |
290 } | 295 } |
291 | 296 |
292 return smallest; | 297 return smallest; |
293 } | 298 } |
294 | 299 |
295 bool WorkerService::CanCreateWorkerProcess( | 300 bool WorkerService::CanCreateWorkerProcess( |
296 const WorkerProcessHost::WorkerInstance& instance) { | 301 const WorkerProcessHost::WorkerInstance& instance) { |
| 302 // Worker can be fired off if *any* parent has room. |
| 303 const WorkerDocumentSet::DocumentInfoSet& parents = |
| 304 instance.worker_document_set()->documents(); |
| 305 |
| 306 for (WorkerDocumentSet::DocumentInfoSet::const_iterator parent_iter = |
| 307 parents.begin(); |
| 308 parent_iter != parents.end(); ++parent_iter) { |
| 309 bool hit_total_worker_limit = false; |
| 310 if (TabCanCreateWorkerProcess(parent_iter->renderer_id(), |
| 311 parent_iter->render_view_route_id(), |
| 312 &hit_total_worker_limit)) { |
| 313 return true; |
| 314 } |
| 315 // Return false if already at the global worker limit (no need to continue |
| 316 // checking parent tabs). |
| 317 if (hit_total_worker_limit) |
| 318 return false; |
| 319 } |
| 320 // If we've reached here, none of the parent tabs is allowed to create an |
| 321 // instance. |
| 322 return false; |
| 323 } |
| 324 |
| 325 bool WorkerService::TabCanCreateWorkerProcess(int renderer_id, |
| 326 int render_view_route_id, |
| 327 bool* hit_total_worker_limit) { |
297 int total_workers = 0; | 328 int total_workers = 0; |
298 int workers_per_tab = 0; | 329 int workers_per_tab = 0; |
| 330 *hit_total_worker_limit = false; |
299 for (ChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | 331 for (ChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
300 !iter.Done(); ++iter) { | 332 !iter.Done(); ++iter) { |
301 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 333 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
302 for (WorkerProcessHost::Instances::const_iterator cur_instance = | 334 for (WorkerProcessHost::Instances::const_iterator cur_instance = |
303 worker->instances().begin(); | 335 worker->instances().begin(); |
304 cur_instance != worker->instances().end(); ++cur_instance) { | 336 cur_instance != worker->instances().end(); ++cur_instance) { |
305 total_workers++; | 337 total_workers++; |
306 if (total_workers >= kMaxWorkersWhenSeparate) | 338 if (total_workers >= kMaxWorkersWhenSeparate) { |
| 339 *hit_total_worker_limit = true; |
307 return false; | 340 return false; |
308 if (cur_instance->renderer_id() == instance.renderer_id() && | 341 } |
309 cur_instance->render_view_route_id() == | 342 if (cur_instance->RendererIsParent(renderer_id, render_view_route_id)) { |
310 instance.render_view_route_id()) { | |
311 workers_per_tab++; | 343 workers_per_tab++; |
312 if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) | 344 if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) |
313 return false; | 345 return false; |
314 } | 346 } |
315 } | 347 } |
316 } | 348 } |
317 | 349 |
318 return true; | 350 return true; |
319 } | 351 } |
320 | 352 |
(...skipping 27 matching lines...) Expand all Loading... |
348 i = queued_workers_.erase(i); | 380 i = queued_workers_.erase(i); |
349 } else { | 381 } else { |
350 ++i; | 382 ++i; |
351 } | 383 } |
352 } | 384 } |
353 | 385 |
354 // Also, see if that render process had any pending shared workers. | 386 // Also, see if that render process had any pending shared workers. |
355 for (WorkerProcessHost::Instances::iterator iter = | 387 for (WorkerProcessHost::Instances::iterator iter = |
356 pending_shared_workers_.begin(); | 388 pending_shared_workers_.begin(); |
357 iter != pending_shared_workers_.end(); ) { | 389 iter != pending_shared_workers_.end(); ) { |
358 iter->RemoveAllAssociatedDocuments(sender); | 390 iter->worker_document_set()->RemoveAll(sender); |
359 if (iter->IsDocumentSetEmpty()) { | 391 if (iter->worker_document_set()->IsEmpty()) { |
360 iter = pending_shared_workers_.erase(iter); | 392 iter = pending_shared_workers_.erase(iter); |
361 } else { | 393 } else { |
362 ++iter; | 394 ++iter; |
363 } | 395 } |
364 } | 396 } |
365 } | 397 } |
366 | 398 |
367 void WorkerService::WorkerProcessDestroyed(WorkerProcessHost* process) { | 399 void WorkerService::WorkerProcessDestroyed(WorkerProcessHost* process) { |
368 if (queued_workers_.empty()) | 400 if (queued_workers_.empty()) |
369 return; | 401 return; |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
454 const string16& name, | 486 const string16& name, |
455 bool off_the_record) { | 487 bool off_the_record) { |
456 // Look for an existing pending worker. | 488 // Look for an existing pending worker. |
457 WorkerProcessHost::WorkerInstance* instance = | 489 WorkerProcessHost::WorkerInstance* instance = |
458 FindPendingInstance(url, name, off_the_record); | 490 FindPendingInstance(url, name, off_the_record); |
459 if (instance) | 491 if (instance) |
460 return instance; | 492 return instance; |
461 | 493 |
462 // No existing pending worker - create a new one. | 494 // No existing pending worker - create a new one. |
463 WorkerProcessHost::WorkerInstance pending( | 495 WorkerProcessHost::WorkerInstance pending( |
464 url, true, off_the_record, name, 0, MSG_ROUTING_NONE, MSG_ROUTING_NONE); | 496 url, true, off_the_record, name, MSG_ROUTING_NONE); |
465 pending_shared_workers_.push_back(pending); | 497 pending_shared_workers_.push_back(pending); |
466 return &pending_shared_workers_.back(); | 498 return &pending_shared_workers_.back(); |
467 } | 499 } |
OLD | NEW |