| OLD | NEW |
| 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/browser/worker_host/worker_service.h" | 5 #include "chrome/browser/worker_host/worker_service.h" |
| 6 | 6 |
| 7 #include <string> | 7 #include <string> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/singleton.h" | 10 #include "base/singleton.h" |
| 11 #include "base/sys_info.h" | 11 #include "base/sys_info.h" |
| 12 #include "base/thread.h" | 12 #include "base/thread.h" |
| 13 #include "chrome/browser/content_settings/host_content_settings_map.h" | 13 #include "chrome/browser/content_settings/host_content_settings_map.h" |
| 14 #include "chrome/browser/plugin_service.h" | 14 #include "chrome/browser/worker_host/worker_message_filter.h" |
| 15 #include "chrome/browser/renderer_host/render_message_filter.h" | |
| 16 #include "chrome/browser/renderer_host/render_process_host.h" | |
| 17 #include "chrome/browser/worker_host/worker_process_host.h" | 15 #include "chrome/browser/worker_host/worker_process_host.h" |
| 18 #include "chrome/common/chrome_switches.h" | 16 #include "chrome/common/chrome_switches.h" |
| 19 #include "chrome/common/notification_service.h" | |
| 20 #include "chrome/common/render_messages.h" | 17 #include "chrome/common/render_messages.h" |
| 18 #include "chrome/common/render_messages_params.h" |
| 21 #include "chrome/common/worker_messages.h" | 19 #include "chrome/common/worker_messages.h" |
| 22 #include "net/base/registry_controlled_domain.h" | 20 #include "net/base/registry_controlled_domain.h" |
| 23 | 21 |
| 24 const int WorkerService::kMaxWorkerProcessesWhenSharing = 10; | 22 const int WorkerService::kMaxWorkerProcessesWhenSharing = 10; |
| 25 const int WorkerService::kMaxWorkersWhenSeparate = 64; | 23 const int WorkerService::kMaxWorkersWhenSeparate = 64; |
| 26 const int WorkerService::kMaxWorkersPerTabWhenSeparate = 16; | 24 const int WorkerService::kMaxWorkersPerTabWhenSeparate = 16; |
| 27 | 25 |
| 28 WorkerService* WorkerService::GetInstance() { | 26 WorkerService* WorkerService::GetInstance() { |
| 29 return Singleton<WorkerService>::get(); | 27 return Singleton<WorkerService>::get(); |
| 30 } | 28 } |
| 31 | 29 |
| 32 WorkerService::WorkerService() | 30 WorkerService::WorkerService() : next_worker_route_id_(0) { |
| 33 : next_worker_route_id_(0), | |
| 34 resource_dispatcher_host_(NULL) { | |
| 35 // Receive a notification if a message filter or WorkerProcessHost is deleted. | |
| 36 registrar_.Add(this, NotificationType::RESOURCE_MESSAGE_FILTER_SHUTDOWN, | |
| 37 NotificationService::AllSources()); | |
| 38 | |
| 39 registrar_.Add(this, NotificationType::WORKER_PROCESS_HOST_SHUTDOWN, | |
| 40 NotificationService::AllSources()); | |
| 41 } | |
| 42 | |
| 43 void WorkerService::Initialize(ResourceDispatcherHost* rdh) { | |
| 44 resource_dispatcher_host_ = rdh; | |
| 45 } | 31 } |
| 46 | 32 |
| 47 WorkerService::~WorkerService() { | 33 WorkerService::~WorkerService() { |
| 48 } | 34 } |
| 49 | 35 |
| 50 bool WorkerService::CreateDedicatedWorker( | 36 void WorkerService::OnWorkerMessageFilterClosing(WorkerMessageFilter* filter) { |
| 51 const GURL& url, | 37 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 52 bool is_off_the_record, | 38 !iter.Done(); ++iter) { |
| 53 unsigned long long document_id, | 39 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 54 int renderer_pid, | 40 worker->FilterShutdown(filter); |
| 55 int render_view_route_id, | 41 } |
| 56 IPC::Message::Sender* sender, | 42 |
| 57 int sender_route_id, | 43 // See if that process had any queued workers. |
| 58 int parent_process_id, | 44 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); |
| 59 int parent_appcache_host_id, | 45 i != queued_workers_.end();) { |
| 60 ChromeURLRequestContext* request_context) { | 46 i->RemoveFilters(filter); |
| 61 return CreateWorker(url, false, is_off_the_record, string16(), | 47 if (i->NumFilters() == 0) { |
| 62 document_id, renderer_pid, render_view_route_id, | 48 i = queued_workers_.erase(i); |
| 63 sender, sender_route_id, | 49 } else { |
| 64 parent_process_id, parent_appcache_host_id, 0, | 50 ++i; |
| 65 request_context); | 51 } |
| 66 } | 52 } |
| 67 | 53 |
| 68 bool WorkerService::CreateSharedWorker( | 54 // Also, see if that process had any pending shared workers. |
| 69 const GURL& url, | 55 for (WorkerProcessHost::Instances::iterator iter = |
| 70 bool is_off_the_record, | 56 pending_shared_workers_.begin(); |
| 71 const string16& name, | 57 iter != pending_shared_workers_.end(); ) { |
| 72 unsigned long long document_id, | 58 iter->worker_document_set()->RemoveAll(filter); |
| 73 int renderer_pid, | 59 if (iter->worker_document_set()->IsEmpty()) { |
| 74 int render_view_route_id, | 60 iter = pending_shared_workers_.erase(iter); |
| 75 IPC::Message::Sender* sender, | 61 } else { |
| 76 int sender_route_id, | 62 ++iter; |
| 77 int64 main_resource_appcache_id, | 63 } |
| 78 ChromeURLRequestContext* request_context) { | 64 } |
| 79 return CreateWorker(url, true, is_off_the_record, name, | 65 |
| 80 document_id, renderer_pid, render_view_route_id, | 66 // Either a worker proceess has shut down, in which case we can start one of |
| 81 sender, sender_route_id, | 67 // the queued workers, or a renderer has shut down, in which case it doesn't |
| 82 0, 0, main_resource_appcache_id, | 68 // affect anything. We call this function in both scenarios because then we |
| 83 request_context); | 69 // don't have to keep track which filters are from worker processes. |
| 84 } | 70 TryStartingQueuedWorker(); |
| 85 | 71 } |
| 86 bool WorkerService::CreateWorker( | 72 |
| 87 const GURL& url, | 73 void WorkerService::CreateWorker(const ViewHostMsg_CreateWorker_Params& params, |
| 88 bool is_shared, | 74 int route_id, |
| 89 bool off_the_record, | 75 WorkerMessageFilter* filter, |
| 90 const string16& name, | 76 URLRequestContextGetter* request_context) { |
| 91 unsigned long long document_id, | 77 |
| 92 int renderer_id, | 78 ChromeURLRequestContext* context = static_cast<ChromeURLRequestContext*>( |
| 93 int render_view_route_id, | 79 request_context->GetURLRequestContext()); |
| 94 IPC::Message::Sender* sender, | 80 |
| 95 int sender_route_id, | |
| 96 int parent_process_id, | |
| 97 int parent_appcache_host_id, | |
| 98 int64 main_resource_appcache_id, | |
| 99 ChromeURLRequestContext* request_context) { | |
| 100 // Generate a unique route id for the browser-worker communication that's | 81 // Generate a unique route id for the browser-worker communication that's |
| 101 // unique among all worker processes. That way when the worker process sends | 82 // unique among all worker processes. That way when the worker process sends |
| 102 // a wrapped IPC message through us, we know which WorkerProcessHost to give | 83 // a wrapped IPC message through us, we know which WorkerProcessHost to give |
| 103 // it to. | 84 // it to. |
| 104 WorkerProcessHost::WorkerInstance instance(url, | 85 WorkerProcessHost::WorkerInstance instance( |
| 105 is_shared, | 86 params.url, |
| 106 off_the_record, | 87 params.is_shared, |
| 107 name, | 88 context->is_off_the_record(), |
| 108 next_worker_route_id(), | 89 params.name, |
| 109 parent_process_id, | 90 next_worker_route_id(), |
| 110 parent_appcache_host_id, | 91 params.is_shared ? 0 : filter->render_process_id(), |
| 111 main_resource_appcache_id, | 92 params.is_shared ? 0 : params.parent_appcache_host_id, |
| 112 request_context); | 93 params.is_shared ? params.script_resource_appcache_id : 0, |
| 113 instance.AddSender(sender, sender_route_id); | 94 request_context); |
| 95 instance.AddFilter(filter, route_id); |
| 114 instance.worker_document_set()->Add( | 96 instance.worker_document_set()->Add( |
| 115 sender, document_id, renderer_id, render_view_route_id); | 97 filter, params.document_id, filter->render_process_id(), |
| 116 | 98 params.render_view_route_id); |
| 117 return CreateWorkerFromInstance(instance); | 99 |
| 100 CreateWorkerFromInstance(instance); |
| 101 } |
| 102 |
| 103 void WorkerService::LookupSharedWorker( |
| 104 const ViewHostMsg_CreateWorker_Params& params, |
| 105 int route_id, |
| 106 WorkerMessageFilter* filter, |
| 107 bool off_the_record, |
| 108 bool* exists, |
| 109 bool* url_mismatch) { |
| 110 |
| 111 *exists = true; |
| 112 WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( |
| 113 params.url, params.name, off_the_record); |
| 114 |
| 115 if (!instance) { |
| 116 // If no worker instance currently exists, we need to create a pending |
| 117 // instance - this is to make sure that any subsequent lookups passing a |
| 118 // mismatched URL get the appropriate url_mismatch error at lookup time. |
| 119 // Having named shared workers was a Really Bad Idea due to details like |
| 120 // this. |
| 121 instance = CreatePendingInstance(params.url, params.name, off_the_record); |
| 122 *exists = false; |
| 123 } |
| 124 |
| 125 // Make sure the passed-in instance matches the URL - if not, return an |
| 126 // error. |
| 127 if (params.url != instance->url()) { |
| 128 *url_mismatch = true; |
| 129 *exists = false; |
| 130 } else { |
| 131 *url_mismatch = false; |
| 132 // Add our route ID to the existing instance so we can send messages to it. |
| 133 instance->AddFilter(filter, route_id); |
| 134 |
| 135 // Add the passed filter/document_id to the worker instance. |
| 136 // TODO(atwilson): This won't work if the message is from a worker process. |
| 137 // We don't support that yet though (this message is only sent from |
| 138 // renderers) but when we do, we'll need to add code to pass in the current |
| 139 // worker's document set for nested workers. |
| 140 instance->worker_document_set()->Add( |
| 141 filter, params.document_id, filter->render_process_id(), |
| 142 params.render_view_route_id); |
| 143 } |
| 144 } |
| 145 |
| 146 void WorkerService::CancelCreateDedicatedWorker( |
| 147 int route_id, |
| 148 WorkerMessageFilter* filter) { |
| 149 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); |
| 150 i != queued_workers_.end(); ++i) { |
| 151 if (i->HasFilter(filter, route_id)) { |
| 152 DCHECK(!i->shared()); |
| 153 queued_workers_.erase(i); |
| 154 return; |
| 155 } |
| 156 } |
| 157 |
| 158 // There could be a race condition where the WebWorkerProxy told us to cancel |
| 159 // the worker right as we sent it a message say it's been created. Look at |
| 160 // the running workers. |
| 161 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 162 !iter.Done(); ++iter) { |
| 163 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 164 for (WorkerProcessHost::Instances::const_iterator instance = |
| 165 worker->instances().begin(); |
| 166 instance != worker->instances().end(); ++instance) { |
| 167 if (instance->HasFilter(filter, route_id)) { |
| 168 // Fake a worker destroyed message so that WorkerProcessHost cleans up |
| 169 // properly. |
| 170 WorkerHostMsg_WorkerContextDestroyed message(route_id); |
| 171 ForwardToWorker(message, filter); |
| 172 return; |
| 173 } |
| 174 } |
| 175 } |
| 176 |
| 177 DCHECK(false) << "Couldn't find worker to cancel"; |
| 178 } |
| 179 |
| 180 void WorkerService::ForwardToWorker(const IPC::Message& message, |
| 181 WorkerMessageFilter* filter) { |
| 182 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 183 !iter.Done(); ++iter) { |
| 184 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 185 if (worker->FilterMessage(message, filter)) |
| 186 return; |
| 187 } |
| 188 |
| 189 // TODO(jabdelmalek): tell filter that callee is gone |
| 190 } |
| 191 |
| 192 void WorkerService::DocumentDetached(unsigned long long document_id, |
| 193 WorkerMessageFilter* filter) { |
| 194 // Any associated shared workers can be shut down. |
| 195 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 196 !iter.Done(); ++iter) { |
| 197 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 198 worker->DocumentDetached(filter, document_id); |
| 199 } |
| 200 |
| 201 // Remove any queued shared workers for this document. |
| 202 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); |
| 203 iter != queued_workers_.end();) { |
| 204 if (iter->shared()) { |
| 205 iter->worker_document_set()->Remove(filter, document_id); |
| 206 if (iter->worker_document_set()->IsEmpty()) { |
| 207 iter = queued_workers_.erase(iter); |
| 208 continue; |
| 209 } |
| 210 } |
| 211 ++iter; |
| 212 } |
| 213 |
| 214 // Remove the document from any pending shared workers. |
| 215 for (WorkerProcessHost::Instances::iterator iter = |
| 216 pending_shared_workers_.begin(); |
| 217 iter != pending_shared_workers_.end(); ) { |
| 218 iter->worker_document_set()->Remove(filter, document_id); |
| 219 if (iter->worker_document_set()->IsEmpty()) { |
| 220 iter = pending_shared_workers_.erase(iter); |
| 221 } else { |
| 222 ++iter; |
| 223 } |
| 224 } |
| 118 } | 225 } |
| 119 | 226 |
| 120 bool WorkerService::CreateWorkerFromInstance( | 227 bool WorkerService::CreateWorkerFromInstance( |
| 121 WorkerProcessHost::WorkerInstance instance) { | 228 WorkerProcessHost::WorkerInstance instance) { |
| 122 | |
| 123 // TODO(michaeln): We need to ensure that a process is working | 229 // TODO(michaeln): We need to ensure that a process is working |
| 124 // on behalf of a single profile. The process sharing logic below | 230 // on behalf of a single profile. The process sharing logic below |
| 125 // does not ensure that. Consider making WorkerService a per profile | 231 // does not ensure that. Consider making WorkerService a per profile |
| 126 // object to help with this. | 232 // object to help with this. |
| 127 WorkerProcessHost* worker = NULL; | 233 WorkerProcessHost* worker = NULL; |
| 128 if (CommandLine::ForCurrentProcess()->HasSwitch( | 234 if (CommandLine::ForCurrentProcess()->HasSwitch( |
| 129 switches::kWebWorkerProcessPerCore)) { | 235 switches::kWebWorkerProcessPerCore)) { |
| 130 worker = GetProcessToFillUpCores(); | 236 worker = GetProcessToFillUpCores(); |
| 131 } else if (CommandLine::ForCurrentProcess()->HasSwitch( | 237 } else if (CommandLine::ForCurrentProcess()->HasSwitch( |
| 132 switches::kWebWorkerShareProcesses)) { | 238 switches::kWebWorkerShareProcesses)) { |
| 133 worker = GetProcessForDomain(instance.url()); | 239 worker = GetProcessForDomain(instance.url()); |
| 134 } else { // One process per worker. | 240 } else { // One process per worker. |
| 135 if (!CanCreateWorkerProcess(instance)) { | 241 if (!CanCreateWorkerProcess(instance)) { |
| 136 queued_workers_.push_back(instance); | 242 queued_workers_.push_back(instance); |
| 137 return true; | 243 return true; |
| 138 } | 244 } |
| 139 } | 245 } |
| 140 | 246 |
| 141 // Check to see if this shared worker is already running (two pages may have | 247 // Check to see if this shared worker is already running (two pages may have |
| 142 // tried to start up the worker simultaneously). | 248 // tried to start up the worker simultaneously). |
| 143 if (instance.shared()) { | 249 if (instance.shared()) { |
| 144 // See if a worker with this name already exists. | 250 // See if a worker with this name already exists. |
| 145 WorkerProcessHost::WorkerInstance* existing_instance = | 251 WorkerProcessHost::WorkerInstance* existing_instance = |
| 146 FindSharedWorkerInstance( | 252 FindSharedWorkerInstance( |
| 147 instance.url(), instance.name(), instance.off_the_record()); | 253 instance.url(), instance.name(), instance.off_the_record()); |
| 148 WorkerProcessHost::WorkerInstance::SenderInfo sender_info = | 254 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
| 149 instance.GetSender(); | 255 instance.GetFilter(); |
| 150 // If this worker is already running, no need to create a new copy. Just | 256 // If this worker is already running, no need to create a new copy. Just |
| 151 // inform the caller that the worker has been created. | 257 // inform the caller that the worker has been created. |
| 152 if (existing_instance) { | 258 if (existing_instance) { |
| 153 // Walk the worker's sender list to see if this client is listed. If not, | 259 // Walk the worker's filter list to see if this client is listed. If not, |
| 154 // then it means that the worker started by the client already exited so | 260 // then it means that the worker started by the client already exited so |
| 155 // we should not attach to this new one (http://crbug.com/29243). | 261 // we should not attach to this new one (http://crbug.com/29243). |
| 156 if (!existing_instance->HasSender(sender_info.first, sender_info.second)) | 262 if (!existing_instance->HasFilter(filter_info.first, filter_info.second)) |
| 157 return false; | 263 return false; |
| 158 sender_info.first->Send(new ViewMsg_WorkerCreated(sender_info.second)); | 264 filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second)); |
| 159 return true; | 265 return true; |
| 160 } | 266 } |
| 161 | 267 |
| 162 // Look to see if there's a pending instance. | 268 // Look to see if there's a pending instance. |
| 163 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( | 269 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( |
| 164 instance.url(), instance.name(), instance.off_the_record()); | 270 instance.url(), instance.name(), instance.off_the_record()); |
| 165 // If there's no instance *and* no pending instance (or there is a pending | 271 // If there's no instance *and* no pending instance (or there is a pending |
| 166 // instance but it does not contain our sender info), then it means the | 272 // instance but it does not contain our filter info), then it means the |
| 167 // worker started up and exited already. Log a warning because this should | 273 // worker started up and exited already. Log a warning because this should |
| 168 // be a very rare occurrence and is probably a bug, but it *can* happen so | 274 // be a very rare occurrence and is probably a bug, but it *can* happen so |
| 169 // handle it gracefully. | 275 // handle it gracefully. |
| 170 if (!pending || | 276 if (!pending || |
| 171 !pending->HasSender(sender_info.first, sender_info.second)) { | 277 !pending->HasFilter(filter_info.first, filter_info.second)) { |
| 172 DLOG(WARNING) << "Pending worker already exited"; | 278 DLOG(WARNING) << "Pending worker already exited"; |
| 173 return false; | 279 return false; |
| 174 } | 280 } |
| 175 | 281 |
| 176 // Assign the accumulated document set and sender list for this pending | 282 // Assign the accumulated document set and filter list for this pending |
| 177 // worker to the new instance. | 283 // worker to the new instance. |
| 178 DCHECK(!pending->worker_document_set()->IsEmpty()); | 284 DCHECK(!pending->worker_document_set()->IsEmpty()); |
| 179 instance.ShareDocumentSet(*pending); | 285 instance.ShareDocumentSet(*pending); |
| 180 for (WorkerProcessHost::WorkerInstance::SenderList::const_iterator i = | 286 for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i = |
| 181 pending->senders().begin(); | 287 pending->filters().begin(); |
| 182 i != pending->senders().end(); ++i) { | 288 i != pending->filters().end(); ++i) { |
| 183 instance.AddSender(i->first, i->second); | 289 instance.AddFilter(i->first, i->second); |
| 184 } | 290 } |
| 185 RemovePendingInstances( | 291 RemovePendingInstances( |
| 186 instance.url(), instance.name(), instance.off_the_record()); | 292 instance.url(), instance.name(), instance.off_the_record()); |
| 187 | 293 |
| 188 // Remove any queued instances of this worker and copy over the sender to | 294 // Remove any queued instances of this worker and copy over the filter to |
| 189 // this instance. | 295 // this instance. |
| 190 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | 296 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); |
| 191 iter != queued_workers_.end();) { | 297 iter != queued_workers_.end();) { |
| 192 if (iter->Matches(instance.url(), instance.name(), | 298 if (iter->Matches(instance.url(), instance.name(), |
| 193 instance.off_the_record())) { | 299 instance.off_the_record())) { |
| 194 DCHECK(iter->NumSenders() == 1); | 300 DCHECK(iter->NumFilters() == 1); |
| 195 WorkerProcessHost::WorkerInstance::SenderInfo sender_info = | 301 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
| 196 iter->GetSender(); | 302 iter->GetFilter(); |
| 197 instance.AddSender(sender_info.first, sender_info.second); | 303 instance.AddFilter(filter_info.first, filter_info.second); |
| 198 iter = queued_workers_.erase(iter); | 304 iter = queued_workers_.erase(iter); |
| 199 } else { | 305 } else { |
| 200 ++iter; | 306 ++iter; |
| 201 } | 307 } |
| 202 } | 308 } |
| 203 } | 309 } |
| 204 | 310 |
| 205 if (!worker) { | 311 if (!worker) { |
| 206 worker = new WorkerProcessHost(resource_dispatcher_host_, | 312 WorkerMessageFilter* first_filter = instance.filters().begin()->first; |
| 207 instance.request_context()); | 313 worker = new WorkerProcessHost( |
| 208 if (!worker->Init()) { | 314 first_filter->resource_dispatcher_host(), |
| 315 instance.request_context()); |
| 316 // TODO(atwilson): This won't work if the message is from a worker process. |
| 317 // We don't support that yet though (this message is only sent from |
| 318 // renderers) but when we do, we'll need to add code to pass in the current |
| 319 // worker's document set for nested workers. |
| 320 if (!worker->Init(first_filter->render_process_id())) { |
| 209 delete worker; | 321 delete worker; |
| 210 return false; | 322 return false; |
| 211 } | 323 } |
| 212 } | 324 } |
| 213 | 325 |
| 214 // TODO(michaeln): As written, test can fail per my earlier comment in | 326 // TODO(michaeln): As written, test can fail per my earlier comment in |
| 215 // this method, but that's a bug. | 327 // this method, but that's a bug. |
| 216 // DCHECK(worker->request_context() == instance.request_context()); | 328 // DCHECK(worker->request_context() == instance.request_context()); |
| 217 | 329 |
| 218 worker->CreateWorker(instance); | 330 worker->CreateWorker(instance); |
| 219 return true; | 331 return true; |
| 220 } | 332 } |
| 221 | 333 |
| 222 bool WorkerService::LookupSharedWorker( | |
| 223 const GURL &url, | |
| 224 const string16& name, | |
| 225 bool off_the_record, | |
| 226 unsigned long long document_id, | |
| 227 int renderer_id, | |
| 228 int render_view_route_id, | |
| 229 IPC::Message::Sender* sender, | |
| 230 int sender_route_id, | |
| 231 bool* url_mismatch) { | |
| 232 bool found_instance = true; | |
| 233 WorkerProcessHost::WorkerInstance* instance = | |
| 234 FindSharedWorkerInstance(url, name, off_the_record); | |
| 235 | |
| 236 if (!instance) { | |
| 237 // If no worker instance currently exists, we need to create a pending | |
| 238 // instance - this is to make sure that any subsequent lookups passing a | |
| 239 // mismatched URL get the appropriate url_mismatch error at lookup time. | |
| 240 // Having named shared workers was a Really Bad Idea due to details like | |
| 241 // this. | |
| 242 instance = CreatePendingInstance(url, name, off_the_record); | |
| 243 found_instance = false; | |
| 244 } | |
| 245 | |
| 246 // Make sure the passed-in instance matches the URL - if not, return an | |
| 247 // error. | |
| 248 if (url != instance->url()) { | |
| 249 *url_mismatch = true; | |
| 250 return false; | |
| 251 } else { | |
| 252 *url_mismatch = false; | |
| 253 } | |
| 254 | |
| 255 // Add our route ID to the existing instance so we can send messages to it. | |
| 256 instance->AddSender(sender, sender_route_id); | |
| 257 | |
| 258 // Add the passed sender/document_id to the worker instance. | |
| 259 instance->worker_document_set()->Add( | |
| 260 sender, document_id, renderer_id, render_view_route_id); | |
| 261 return found_instance; | |
| 262 } | |
| 263 | |
| 264 void WorkerService::DocumentDetached(IPC::Message::Sender* sender, | |
| 265 unsigned long long document_id) { | |
| 266 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | |
| 267 !iter.Done(); ++iter) { | |
| 268 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 269 worker->DocumentDetached(sender, document_id); | |
| 270 } | |
| 271 | |
| 272 // Remove any queued shared workers for this document. | |
| 273 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | |
| 274 iter != queued_workers_.end();) { | |
| 275 if (iter->shared()) { | |
| 276 iter->worker_document_set()->Remove(sender, document_id); | |
| 277 if (iter->worker_document_set()->IsEmpty()) { | |
| 278 iter = queued_workers_.erase(iter); | |
| 279 continue; | |
| 280 } | |
| 281 } | |
| 282 ++iter; | |
| 283 } | |
| 284 | |
| 285 // Remove the document from any pending shared workers. | |
| 286 for (WorkerProcessHost::Instances::iterator iter = | |
| 287 pending_shared_workers_.begin(); | |
| 288 iter != pending_shared_workers_.end(); ) { | |
| 289 iter->worker_document_set()->Remove(sender, document_id); | |
| 290 if (iter->worker_document_set()->IsEmpty()) { | |
| 291 iter = pending_shared_workers_.erase(iter); | |
| 292 } else { | |
| 293 ++iter; | |
| 294 } | |
| 295 } | |
| 296 } | |
| 297 | |
| 298 void WorkerService::CancelCreateDedicatedWorker(IPC::Message::Sender* sender, | |
| 299 int sender_route_id) { | |
| 300 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); | |
| 301 i != queued_workers_.end(); ++i) { | |
| 302 if (i->HasSender(sender, sender_route_id)) { | |
| 303 DCHECK(!i->shared()); | |
| 304 queued_workers_.erase(i); | |
| 305 return; | |
| 306 } | |
| 307 } | |
| 308 | |
| 309 // There could be a race condition where the WebWorkerProxy told us to cancel | |
| 310 // the worker right as we sent it a message say it's been created. Look at | |
| 311 // the running workers. | |
| 312 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | |
| 313 !iter.Done(); ++iter) { | |
| 314 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 315 for (WorkerProcessHost::Instances::const_iterator instance = | |
| 316 worker->instances().begin(); | |
| 317 instance != worker->instances().end(); ++instance) { | |
| 318 if (instance->HasSender(sender, sender_route_id)) { | |
| 319 // Fake a worker destroyed message so that WorkerProcessHost cleans up | |
| 320 // properly. | |
| 321 WorkerHostMsg_WorkerContextDestroyed msg(sender_route_id); | |
| 322 ForwardMessage(msg, sender); | |
| 323 return; | |
| 324 } | |
| 325 } | |
| 326 } | |
| 327 | |
| 328 DCHECK(false) << "Couldn't find worker to cancel"; | |
| 329 } | |
| 330 | |
| 331 void WorkerService::ForwardMessage(const IPC::Message& message, | |
| 332 IPC::Message::Sender* sender) { | |
| 333 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | |
| 334 !iter.Done(); ++iter) { | |
| 335 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 336 if (worker->FilterMessage(message, sender)) | |
| 337 return; | |
| 338 } | |
| 339 | |
| 340 // TODO(jabdelmalek): tell sender that callee is gone | |
| 341 } | |
| 342 | |
| 343 WorkerProcessHost* WorkerService::GetProcessForDomain(const GURL& url) { | 334 WorkerProcessHost* WorkerService::GetProcessForDomain(const GURL& url) { |
| 344 int num_processes = 0; | 335 int num_processes = 0; |
| 345 std::string domain = | 336 std::string domain = |
| 346 net::RegistryControlledDomainService::GetDomainAndRegistry(url); | 337 net::RegistryControlledDomainService::GetDomainAndRegistry(url); |
| 347 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | 338 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 348 !iter.Done(); ++iter) { | 339 !iter.Done(); ++iter) { |
| 349 num_processes++; | 340 num_processes++; |
| 350 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 341 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 351 for (WorkerProcessHost::Instances::const_iterator instance = | 342 for (WorkerProcessHost::Instances::const_iterator instance = |
| 352 worker->instances().begin(); | 343 worker->instances().begin(); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 391 bool WorkerService::CanCreateWorkerProcess( | 382 bool WorkerService::CanCreateWorkerProcess( |
| 392 const WorkerProcessHost::WorkerInstance& instance) { | 383 const WorkerProcessHost::WorkerInstance& instance) { |
| 393 // Worker can be fired off if *any* parent has room. | 384 // Worker can be fired off if *any* parent has room. |
| 394 const WorkerDocumentSet::DocumentInfoSet& parents = | 385 const WorkerDocumentSet::DocumentInfoSet& parents = |
| 395 instance.worker_document_set()->documents(); | 386 instance.worker_document_set()->documents(); |
| 396 | 387 |
| 397 for (WorkerDocumentSet::DocumentInfoSet::const_iterator parent_iter = | 388 for (WorkerDocumentSet::DocumentInfoSet::const_iterator parent_iter = |
| 398 parents.begin(); | 389 parents.begin(); |
| 399 parent_iter != parents.end(); ++parent_iter) { | 390 parent_iter != parents.end(); ++parent_iter) { |
| 400 bool hit_total_worker_limit = false; | 391 bool hit_total_worker_limit = false; |
| 401 if (TabCanCreateWorkerProcess(parent_iter->renderer_id(), | 392 if (TabCanCreateWorkerProcess(parent_iter->render_process_id(), |
| 402 parent_iter->render_view_route_id(), | 393 parent_iter->render_view_id(), |
| 403 &hit_total_worker_limit)) { | 394 &hit_total_worker_limit)) { |
| 404 return true; | 395 return true; |
| 405 } | 396 } |
| 406 // Return false if already at the global worker limit (no need to continue | 397 // Return false if already at the global worker limit (no need to continue |
| 407 // checking parent tabs). | 398 // checking parent tabs). |
| 408 if (hit_total_worker_limit) | 399 if (hit_total_worker_limit) |
| 409 return false; | 400 return false; |
| 410 } | 401 } |
| 411 // If we've reached here, none of the parent tabs is allowed to create an | 402 // If we've reached here, none of the parent tabs is allowed to create an |
| 412 // instance. | 403 // instance. |
| 413 return false; | 404 return false; |
| 414 } | 405 } |
| 415 | 406 |
| 416 bool WorkerService::TabCanCreateWorkerProcess(int renderer_id, | 407 bool WorkerService::TabCanCreateWorkerProcess(int render_process_id, |
| 417 int render_view_route_id, | 408 int render_view_id, |
| 418 bool* hit_total_worker_limit) { | 409 bool* hit_total_worker_limit) { |
| 419 int total_workers = 0; | 410 int total_workers = 0; |
| 420 int workers_per_tab = 0; | 411 int workers_per_tab = 0; |
| 421 *hit_total_worker_limit = false; | 412 *hit_total_worker_limit = false; |
| 422 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | 413 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 423 !iter.Done(); ++iter) { | 414 !iter.Done(); ++iter) { |
| 424 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 415 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 425 for (WorkerProcessHost::Instances::const_iterator cur_instance = | 416 for (WorkerProcessHost::Instances::const_iterator cur_instance = |
| 426 worker->instances().begin(); | 417 worker->instances().begin(); |
| 427 cur_instance != worker->instances().end(); ++cur_instance) { | 418 cur_instance != worker->instances().end(); ++cur_instance) { |
| 428 total_workers++; | 419 total_workers++; |
| 429 if (total_workers >= kMaxWorkersWhenSeparate) { | 420 if (total_workers >= kMaxWorkersWhenSeparate) { |
| 430 *hit_total_worker_limit = true; | 421 *hit_total_worker_limit = true; |
| 431 return false; | 422 return false; |
| 432 } | 423 } |
| 433 if (cur_instance->RendererIsParent(renderer_id, render_view_route_id)) { | 424 if (cur_instance->RendererIsParent(render_process_id, render_view_id)) { |
| 434 workers_per_tab++; | 425 workers_per_tab++; |
| 435 if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) | 426 if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) |
| 436 return false; | 427 return false; |
| 437 } | 428 } |
| 438 } | 429 } |
| 439 } | 430 } |
| 440 | 431 |
| 441 return true; | 432 return true; |
| 442 } | 433 } |
| 443 | 434 |
| 444 void WorkerService::Observe(NotificationType type, | 435 void WorkerService::TryStartingQueuedWorker() { |
| 445 const NotificationSource& source, | |
| 446 const NotificationDetails& details) { | |
| 447 if (type.value == NotificationType::RESOURCE_MESSAGE_FILTER_SHUTDOWN) { | |
| 448 RenderMessageFilter* sender = Source<RenderMessageFilter>(source).ptr(); | |
| 449 SenderShutdown(sender); | |
| 450 } else if (type.value == NotificationType::WORKER_PROCESS_HOST_SHUTDOWN) { | |
| 451 WorkerProcessHost* sender = Source<WorkerProcessHost>(source).ptr(); | |
| 452 SenderShutdown(sender); | |
| 453 WorkerProcessDestroyed(sender); | |
| 454 } else { | |
| 455 NOTREACHED(); | |
| 456 } | |
| 457 } | |
| 458 | |
| 459 void WorkerService::SenderShutdown(IPC::Message::Sender* sender) { | |
| 460 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | |
| 461 !iter.Done(); ++iter) { | |
| 462 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | |
| 463 worker->SenderShutdown(sender); | |
| 464 } | |
| 465 | |
| 466 // See if that render process had any queued workers. | |
| 467 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); | |
| 468 i != queued_workers_.end();) { | |
| 469 i->RemoveSenders(sender); | |
| 470 if (i->NumSenders() == 0) { | |
| 471 i = queued_workers_.erase(i); | |
| 472 } else { | |
| 473 ++i; | |
| 474 } | |
| 475 } | |
| 476 | |
| 477 // Also, see if that render process had any pending shared workers. | |
| 478 for (WorkerProcessHost::Instances::iterator iter = | |
| 479 pending_shared_workers_.begin(); | |
| 480 iter != pending_shared_workers_.end(); ) { | |
| 481 iter->worker_document_set()->RemoveAll(sender); | |
| 482 if (iter->worker_document_set()->IsEmpty()) { | |
| 483 iter = pending_shared_workers_.erase(iter); | |
| 484 } else { | |
| 485 ++iter; | |
| 486 } | |
| 487 } | |
| 488 } | |
| 489 | |
| 490 void WorkerService::WorkerProcessDestroyed(WorkerProcessHost* process) { | |
| 491 if (queued_workers_.empty()) | 436 if (queued_workers_.empty()) |
| 492 return; | 437 return; |
| 493 | 438 |
| 494 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); | 439 for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); |
| 495 i != queued_workers_.end();) { | 440 i != queued_workers_.end();) { |
| 496 if (CanCreateWorkerProcess(*i)) { | 441 if (CanCreateWorkerProcess(*i)) { |
| 497 WorkerProcessHost::WorkerInstance instance = *i; | 442 WorkerProcessHost::WorkerInstance instance = *i; |
| 498 queued_workers_.erase(i); | 443 queued_workers_.erase(i); |
| 499 CreateWorkerFromInstance(instance); | 444 CreateWorkerFromInstance(instance); |
| 500 | 445 |
| 501 // CreateWorkerFromInstance can modify the queued_workers_ list when it | 446 // CreateWorkerFromInstance can modify the queued_workers_ list when it |
| 502 // coalesces queued instances after starting a shared worker, so we | 447 // coalesces queued instances after starting a shared worker, so we |
| 503 // have to rescan the list from the beginning (our iterator is now | 448 // have to rescan the list from the beginning (our iterator is now |
| 504 // invalid). This is not a big deal as having any queued workers will be | 449 // invalid). This is not a big deal as having any queued workers will be |
| 505 // rare in practice so the list will be small. | 450 // rare in practice so the list will be small. |
| 506 i = queued_workers_.begin(); | 451 i = queued_workers_.begin(); |
| 507 } else { | 452 } else { |
| 508 ++i; | 453 ++i; |
| 509 } | 454 } |
| 510 } | 455 } |
| 511 } | 456 } |
| 512 | 457 |
| 458 bool WorkerService::GetRendererForWorker(int worker_process_id, |
| 459 int* render_process_id, |
| 460 int* render_view_id) const { |
| 461 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 462 !iter.Done(); ++iter) { |
| 463 if (iter->id() != worker_process_id) |
| 464 continue; |
| 465 |
| 466 // This code assumes one worker per process, see function comment in header! |
| 467 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 468 WorkerProcessHost::Instances::const_iterator first_instance = |
| 469 worker->instances().begin(); |
| 470 if (first_instance == worker->instances().end()) |
| 471 return false; |
| 472 |
| 473 WorkerDocumentSet::DocumentInfoSet::const_iterator info = |
| 474 first_instance->worker_document_set()->documents().begin(); |
| 475 *render_process_id = info->render_process_id(); |
| 476 *render_view_id = info->render_view_id(); |
| 477 return true; |
| 478 } |
| 479 return false; |
| 480 } |
| 481 |
| 513 const WorkerProcessHost::WorkerInstance* WorkerService::FindWorkerInstance( | 482 const WorkerProcessHost::WorkerInstance* WorkerService::FindWorkerInstance( |
| 514 int worker_process_id) { | 483 int worker_process_id) { |
| 515 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); | 484 for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS); |
| 516 !iter.Done(); ++iter) { | 485 !iter.Done(); ++iter) { |
| 517 if (iter->id() != worker_process_id) | 486 if (iter->id() != worker_process_id) |
| 518 continue; | 487 continue; |
| 519 | 488 |
| 520 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); | 489 WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter); |
| 521 WorkerProcessHost::Instances::const_iterator instance = | 490 WorkerProcessHost::Instances::const_iterator instance = |
| 522 worker->instances().begin(); | 491 worker->instances().begin(); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 582 FindPendingInstance(url, name, off_the_record); | 551 FindPendingInstance(url, name, off_the_record); |
| 583 if (instance) | 552 if (instance) |
| 584 return instance; | 553 return instance; |
| 585 | 554 |
| 586 // No existing pending worker - create a new one. | 555 // No existing pending worker - create a new one. |
| 587 WorkerProcessHost::WorkerInstance pending( | 556 WorkerProcessHost::WorkerInstance pending( |
| 588 url, true, off_the_record, name, MSG_ROUTING_NONE, 0, 0, 0, NULL); | 557 url, true, off_the_record, name, MSG_ROUTING_NONE, 0, 0, 0, NULL); |
| 589 pending_shared_workers_.push_back(pending); | 558 pending_shared_workers_.push_back(pending); |
| 590 return &pending_shared_workers_.back(); | 559 return &pending_shared_workers_.back(); |
| 591 } | 560 } |
| OLD | NEW |