Chromium Code Reviews| Index: chrome/browser/worker_host/worker_process_host.cc |
| diff --git a/chrome/browser/worker_host/worker_process_host.cc b/chrome/browser/worker_host/worker_process_host.cc |
| index a5cbbb82f32ea09a862b89815e818f574fc67ea0..b87b620624d4a6fef1cf5ca09f2d74cc0e6cb6c9 100644 |
| --- a/chrome/browser/worker_host/worker_process_host.cc |
| +++ b/chrome/browser/worker_host/worker_process_host.cc |
| @@ -150,15 +150,15 @@ void WorkerProcessHost::CreateWorker(const WorkerInstance& instance) { |
| instance.worker_route_id)); |
| UpdateTitle(); |
| - instances_.back().sender->Send( |
| - new ViewMsg_WorkerCreated(instance.sender_route_id)); |
| + DCHECK(instance.senders.size() == 1); |
| + WorkerInstance::SenderInfo info = *(instances_.back().senders.begin()); |
| + info.first->Send(new ViewMsg_WorkerCreated(info.second)); |
| } |
| bool WorkerProcessHost::FilterMessage(const IPC::Message& message, |
| - int sender_pid) { |
| + IPC::Message::Sender* sender) { |
| for (Instances::iterator i = instances_.begin(); i != instances_.end(); ++i) { |
| - if (i->sender_id == sender_pid && |
| - i->sender_route_id == message.routing_id()) { |
| + if (!i->closed && i->HasSender(sender, message.routing_id())) { |
| RelayMessage( |
| message, this, i->worker_route_id, next_route_id_callback_.get()); |
| return true; |
| @@ -174,6 +174,19 @@ URLRequestContext* WorkerProcessHost::GetRequestContext( |
| return NULL; |
| } |
| +// Sent to notify the browser process when a worker context invokes close(), so |
| +// no new connections are sent to shared workers. |
| +void WorkerProcessHost::OnWorkerContextClosed(int worker_route_id) { |
| + for (Instances::iterator i = instances_.begin(); i != instances_.end(); ++i) { |
| + if (i->worker_route_id == worker_route_id) { |
| + // Set the closed flag - this will stop any further connections from |
| + // being sent to the shared worker (ignored for dedicated workers). |
|
jam
2009/11/12 20:11:23
nit: the comment says it's ignored for dedicated w
|
| + i->closed = true; |
| + break; |
| + } |
| + } |
| +} |
| + |
| void WorkerProcessHost::OnMessageReceived(const IPC::Message& message) { |
| bool msg_is_ok = true; |
| bool handled = MessagePortDispatcher::GetInstance()->OnMessageReceived( |
| @@ -185,6 +198,8 @@ void WorkerProcessHost::OnMessageReceived(const IPC::Message& message) { |
| IPC_MESSAGE_HANDLER(ViewHostMsg_CreateWorker, OnCreateWorker) |
| IPC_MESSAGE_HANDLER(ViewHostMsg_CancelCreateDedicatedWorker, |
| OnCancelCreateDedicatedWorker) |
| + IPC_MESSAGE_HANDLER(WorkerHostMsg_WorkerContextClosed, |
| + OnWorkerContextClosed); |
| IPC_MESSAGE_HANDLER(ViewHostMsg_ForwardToWorker, |
| OnForwardToWorker) |
|
jam
2009/11/12 20:11:23
can you add the new messages that you receive in R
|
| IPC_MESSAGE_UNHANDLED(handled = false) |
| @@ -201,9 +216,15 @@ void WorkerProcessHost::OnMessageReceived(const IPC::Message& message) { |
| for (Instances::iterator i = instances_.begin(); i != instances_.end(); ++i) { |
| if (i->worker_route_id == message.routing_id()) { |
| - CallbackWithReturnValue<int>::Type* next_route_id = |
| - GetNextRouteIdCallback(i->sender); |
| - RelayMessage(message, i->sender, i->sender_route_id, next_route_id); |
| + if (!i->is_shared) { |
| + // Don't relay messages from shared workers (all communication is via |
| + // the message port). |
| + DCHECK(i->senders.size() == 1); |
| + WorkerInstance::SenderInfo info = *(i->senders.begin()); |
| + CallbackWithReturnValue<int>::Type* next_route_id = |
| + GetNextRouteIdCallback(info.first); |
| + RelayMessage(message, info.first, info.second, next_route_id); |
| + } |
| if (message.type() == WorkerHostMsg_WorkerContextDestroyed::ID) { |
| instances_.erase(i); |
| @@ -293,7 +314,16 @@ void WorkerProcessHost::RelayMessage( |
| void WorkerProcessHost::SenderShutdown(IPC::Message::Sender* sender) { |
| for (Instances::iterator i = instances_.begin(); i != instances_.end();) { |
| - if (i->sender == sender) { |
| + bool shutdown = false; |
| + i->RemoveSenders(sender); |
| + if (i->is_shared) { |
| + i->RemoveAllAssociatedDocuments(sender); |
| + if (i->document_set.empty()) |
| + shutdown = true; |
| + } else if (i->senders.empty()) { |
| + shutdown = true; |
| + } |
| + if (shutdown) { |
| Send(new WorkerMsg_TerminateWorkerContext(i->worker_route_id)); |
| i = instances_.erase(i); |
| } else { |
| @@ -336,13 +366,111 @@ void WorkerProcessHost::OnCreateWorker(const GURL& url, |
| *route_id = WorkerService::GetInstance()->next_worker_route_id(); |
| WorkerService::GetInstance()->CreateWorker( |
| url, is_shared, name, instances_.front().renderer_id, |
| - instances_.front().render_view_route_id, this, id(), *route_id); |
| + instances_.front().render_view_route_id, this, *route_id); |
| } |
| void WorkerProcessHost::OnCancelCreateDedicatedWorker(int route_id) { |
| - WorkerService::GetInstance()->CancelCreateDedicatedWorker(id(), route_id); |
| + WorkerService::GetInstance()->CancelCreateDedicatedWorker(this, route_id); |
| } |
| void WorkerProcessHost::OnForwardToWorker(const IPC::Message& message) { |
| - WorkerService::GetInstance()->ForwardMessage(message, id()); |
| + WorkerService::GetInstance()->ForwardMessage(message, this); |
| +} |
| + |
| +void WorkerProcessHost::DocumentDetached(IPC::Message::Sender* parent, |
| + unsigned long long document_id) |
| +{ |
| + // Walk all instances and remove the document from their document set |
|
jam
2009/11/12 20:11:23
very nitty nit: period at end of comment
|
| + for (Instances::iterator i = instances_.begin(); i != instances_.end();) { |
| + if (!i->is_shared) { |
| + ++i; |
| + } else { |
| + i->RemoveFromDocumentSet(parent, document_id); |
| + if (i->document_set.empty()) { |
| + // This worker has no more associated documents - shut it down. |
| + Send(new WorkerMsg_TerminateWorkerContext(i->worker_route_id)); |
| + i = instances_.erase(i); |
| + } else { |
| + ++i; |
| + } |
| + } |
| + } |
| +} |
| + |
| +// Compares an instance based on the algorithm in the WebWorkers spec - an |
| +// instance matches if the origins of the URLs match, and: |
| +// a) the names are non-empty and equal |
| +// -or- |
| +// b) the names are both empty, and the urls are equal |
| +bool WorkerProcessHost::WorkerInstance::Matches( |
| + const GURL& match_url, const string16& match_name) const { |
| + // Only match open shared workers. |
| + if (!is_shared || closed) |
| + return false; |
| + |
| + if (url.GetOrigin() != match_url.GetOrigin()) |
| + return false; |
| + |
| + if (name.empty() && match_name.empty()) |
| + return url == match_url; |
| + |
| + return name == match_name; |
| +} |
| + |
| +void WorkerProcessHost::WorkerInstance::AddToDocumentSet( |
| + IPC::Message::Sender* parent, unsigned long long document_id) { |
| + DocumentInfo info(parent, document_id); |
| + document_set.insert(info); |
| +} |
| + |
| +bool WorkerProcessHost::WorkerInstance::IsInDocumentSet( |
| + IPC::Message::Sender* parent, unsigned long long document_id) const { |
| + DocumentInfo info(parent, document_id); |
| + return document_set.find(info) != document_set.end(); |
| +} |
| + |
| +void WorkerProcessHost::WorkerInstance::RemoveFromDocumentSet( |
| + IPC::Message::Sender* parent, unsigned long long document_id) { |
| + DocumentInfo info(parent, document_id); |
| + document_set.erase(info); |
| +} |
| + |
| +void WorkerProcessHost::WorkerInstance::RemoveAllAssociatedDocuments( |
| + IPC::Message::Sender* parent) { |
| + for (DocumentSet::iterator i = document_set.begin(); |
| + i != document_set.end(); |
| + ++i) { |
| + if (i->first == parent) |
| + document_set.erase(i); |
|
jam
2009/11/12 20:11:23
this isn't safe, need to increment i only if you d
|
| + } |
| +} |
| + |
| +void WorkerProcessHost::WorkerInstance::AddSender(IPC::Message::Sender* sender, |
| + int sender_route_id) { |
| + SenderInfo info(sender, sender_route_id); |
| + senders.insert(info); |
| + // Only shared workers can have more than one associated sender. |
| + DCHECK(is_shared || senders.size() == 1); |
| +} |
| + |
| +void WorkerProcessHost::WorkerInstance::RemoveSender( |
| + IPC::Message::Sender* sender, int sender_route_id) { |
| + SenderInfo info(sender, sender_route_id); |
| + senders.erase(info); |
| } |
| + |
| +void WorkerProcessHost::WorkerInstance::RemoveSenders( |
| + IPC::Message::Sender* sender) { |
| + for (SenderList::iterator i = senders.begin(); i != senders.end(); ++i) { |
| + if (i->first == sender) |
| + senders.erase(i); |
| + } |
| +} |
| + |
| +bool WorkerProcessHost::WorkerInstance::HasSender( |
| + IPC::Message::Sender* sender, int sender_route_id) const { |
| + SenderInfo info(sender, sender_route_id); |
| + return senders.find(info) != senders.end(); |
| +} |
| + |
| + |