Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2873)

Unified Diff: chrome/browser/worker_host/worker_service.cc

Issue 6055002: Create a message filter for message port messages. This allows a nice cleanu... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 10 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: chrome/browser/worker_host/worker_service.cc
===================================================================
--- chrome/browser/worker_host/worker_service.cc (revision 69724)
+++ chrome/browser/worker_host/worker_service.cc (working copy)
@@ -11,13 +11,11 @@
#include "base/sys_info.h"
#include "base/thread.h"
#include "chrome/browser/content_settings/host_content_settings_map.h"
-#include "chrome/browser/plugin_service.h"
-#include "chrome/browser/renderer_host/render_message_filter.h"
-#include "chrome/browser/renderer_host/render_process_host.h"
+#include "chrome/browser/worker_host/worker_message_filter.h"
#include "chrome/browser/worker_host/worker_process_host.h"
#include "chrome/common/chrome_switches.h"
-#include "chrome/common/notification_service.h"
#include "chrome/common/render_messages.h"
+#include "chrome/common/render_messages_params.h"
#include "chrome/common/worker_messages.h"
#include "net/base/registry_controlled_domain.h"
@@ -29,97 +27,205 @@
return Singleton<WorkerService>::get();
}
-WorkerService::WorkerService()
- : next_worker_route_id_(0),
- resource_dispatcher_host_(NULL) {
- // Receive a notification if a message filter or WorkerProcessHost is deleted.
- registrar_.Add(this, NotificationType::RESOURCE_MESSAGE_FILTER_SHUTDOWN,
- NotificationService::AllSources());
-
- registrar_.Add(this, NotificationType::WORKER_PROCESS_HOST_SHUTDOWN,
- NotificationService::AllSources());
+WorkerService::WorkerService() : next_worker_route_id_(0) {
}
-void WorkerService::Initialize(ResourceDispatcherHost* rdh) {
- resource_dispatcher_host_ = rdh;
-}
-
WorkerService::~WorkerService() {
}
-bool WorkerService::CreateDedicatedWorker(
- const GURL& url,
- bool is_off_the_record,
- unsigned long long document_id,
- int renderer_pid,
- int render_view_route_id,
- IPC::Message::Sender* sender,
- int sender_route_id,
- int parent_process_id,
- int parent_appcache_host_id,
- ChromeURLRequestContext* request_context) {
- return CreateWorker(url, false, is_off_the_record, string16(),
- document_id, renderer_pid, render_view_route_id,
- sender, sender_route_id,
- parent_process_id, parent_appcache_host_id, 0,
- request_context);
-}
+void WorkerService::OnWorkerMessageFilterClosing(WorkerMessageFilter* filter) {
+ for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
+ !iter.Done(); ++iter) {
+ WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
+ worker->FilterShutdown(filter);
+ }
-bool WorkerService::CreateSharedWorker(
- const GURL& url,
- bool is_off_the_record,
- const string16& name,
- unsigned long long document_id,
- int renderer_pid,
- int render_view_route_id,
- IPC::Message::Sender* sender,
- int sender_route_id,
- int64 main_resource_appcache_id,
- ChromeURLRequestContext* request_context) {
- return CreateWorker(url, true, is_off_the_record, name,
- document_id, renderer_pid, render_view_route_id,
- sender, sender_route_id,
- 0, 0, main_resource_appcache_id,
- request_context);
+ // See if that process had any queued workers.
+ for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin();
+ i != queued_workers_.end();) {
+ i->RemoveFilters(filter);
+ if (i->NumFilters() == 0) {
+ i = queued_workers_.erase(i);
+ } else {
+ ++i;
+ }
+ }
+
+ // Also, see if that process had any pending shared workers.
+ for (WorkerProcessHost::Instances::iterator iter =
+ pending_shared_workers_.begin();
+ iter != pending_shared_workers_.end(); ) {
+ iter->worker_document_set()->RemoveAll(filter);
+ if (iter->worker_document_set()->IsEmpty()) {
+ iter = pending_shared_workers_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+
+ // Either a worker proceess has shut down, in which case we can start one of
+ // the queued workers, or a renderer has shut down, in which case it doesn't
+ // affect anything. We call this function in both scenarios because then we
+ // don't have to keep track which filters are from worker processes.
+ TryStartingQueuedWorker();
}
-bool WorkerService::CreateWorker(
- const GURL& url,
- bool is_shared,
- bool off_the_record,
- const string16& name,
- unsigned long long document_id,
- int renderer_id,
- int render_view_route_id,
- IPC::Message::Sender* sender,
- int sender_route_id,
- int parent_process_id,
- int parent_appcache_host_id,
- int64 main_resource_appcache_id,
- ChromeURLRequestContext* request_context) {
+void WorkerService::CreateWorker(const ViewHostMsg_CreateWorker_Params& params,
+ int route_id,
+ WorkerMessageFilter* filter,
+ URLRequestContextGetter* request_context) {
+
+ ChromeURLRequestContext* context = static_cast<ChromeURLRequestContext*>(
+ request_context->GetURLRequestContext());
+
// Generate a unique route id for the browser-worker communication that's
// unique among all worker processes. That way when the worker process sends
// a wrapped IPC message through us, we know which WorkerProcessHost to give
// it to.
- WorkerProcessHost::WorkerInstance instance(url,
- is_shared,
- off_the_record,
- name,
- next_worker_route_id(),
- parent_process_id,
- parent_appcache_host_id,
- main_resource_appcache_id,
- request_context);
- instance.AddSender(sender, sender_route_id);
+ WorkerProcessHost::WorkerInstance instance(
+ params.url,
+ params.is_shared,
+ context->is_off_the_record(),
+ params.name,
+ next_worker_route_id(),
+ params.is_shared ? 0 : filter->render_process_id(),
+ params.is_shared ? 0 : params.parent_appcache_host_id,
+ params.is_shared ? params.script_resource_appcache_id : 0,
+ request_context);
+ instance.AddFilter(filter, route_id);
instance.worker_document_set()->Add(
- sender, document_id, renderer_id, render_view_route_id);
+ filter, params.document_id, filter->render_process_id(),
+ params.render_view_route_id);
- return CreateWorkerFromInstance(instance);
+ CreateWorkerFromInstance(instance);
}
+void WorkerService::LookupSharedWorker(
+ const ViewHostMsg_CreateWorker_Params& params,
+ int route_id,
+ WorkerMessageFilter* filter,
+ bool off_the_record,
+ bool* exists,
+ bool* url_mismatch) {
+
+ *exists = true;
+ WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance(
+ params.url, params.name, off_the_record);
+
+ if (!instance) {
+ // If no worker instance currently exists, we need to create a pending
+ // instance - this is to make sure that any subsequent lookups passing a
+ // mismatched URL get the appropriate url_mismatch error at lookup time.
+ // Having named shared workers was a Really Bad Idea due to details like
+ // this.
+ instance = CreatePendingInstance(params.url, params.name, off_the_record);
+ *exists = false;
+ }
+
+ // Make sure the passed-in instance matches the URL - if not, return an
+ // error.
+ if (params.url != instance->url()) {
+ *url_mismatch = true;
+ *exists = false;
+ } else {
+ *url_mismatch = false;
+ // Add our route ID to the existing instance so we can send messages to it.
+ instance->AddFilter(filter, route_id);
+
+ // Add the passed filter/document_id to the worker instance.
+ // TODO(atwilson): This won't work if the message is from a worker process.
+ // We don't support that yet though (this message is only sent from
+ // renderers) but when we do, we'll need to add code to pass in the current
+ // worker's document set for nested workers.
+ instance->worker_document_set()->Add(
+ filter, params.document_id, filter->render_process_id(),
+ params.render_view_route_id);
+ }
+}
+
+void WorkerService::CancelCreateDedicatedWorker(
+ int route_id,
+ WorkerMessageFilter* filter) {
+ for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin();
+ i != queued_workers_.end(); ++i) {
+ if (i->HasFilter(filter, route_id)) {
+ DCHECK(!i->shared());
+ queued_workers_.erase(i);
+ return;
+ }
+ }
+
+ // There could be a race condition where the WebWorkerProxy told us to cancel
+ // the worker right as we sent it a message say it's been created. Look at
+ // the running workers.
+ for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
+ !iter.Done(); ++iter) {
+ WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
+ for (WorkerProcessHost::Instances::const_iterator instance =
+ worker->instances().begin();
+ instance != worker->instances().end(); ++instance) {
+ if (instance->HasFilter(filter, route_id)) {
+ // Fake a worker destroyed message so that WorkerProcessHost cleans up
+ // properly.
+ WorkerHostMsg_WorkerContextDestroyed message(route_id);
+ ForwardToWorker(message, filter);
+ return;
+ }
+ }
+ }
+
+ DCHECK(false) << "Couldn't find worker to cancel";
+}
+
+void WorkerService::ForwardToWorker(const IPC::Message& message,
+ WorkerMessageFilter* filter) {
+ for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
+ !iter.Done(); ++iter) {
+ WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
+ if (worker->FilterMessage(message, filter))
+ return;
+ }
+
+ // TODO(jabdelmalek): tell filter that callee is gone
+}
+
+void WorkerService::DocumentDetached(unsigned long long document_id,
+ WorkerMessageFilter* filter) {
+ // Any associated shared workers can be shut down.
+ for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
+ !iter.Done(); ++iter) {
+ WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
+ worker->DocumentDetached(filter, document_id);
+ }
+
+ // Remove any queued shared workers for this document.
+ for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin();
+ iter != queued_workers_.end();) {
+ if (iter->shared()) {
+ iter->worker_document_set()->Remove(filter, document_id);
+ if (iter->worker_document_set()->IsEmpty()) {
+ iter = queued_workers_.erase(iter);
+ continue;
+ }
+ }
+ ++iter;
+ }
+
+ // Remove the document from any pending shared workers.
+ for (WorkerProcessHost::Instances::iterator iter =
+ pending_shared_workers_.begin();
+ iter != pending_shared_workers_.end(); ) {
+ iter->worker_document_set()->Remove(filter, document_id);
+ if (iter->worker_document_set()->IsEmpty()) {
+ iter = pending_shared_workers_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
bool WorkerService::CreateWorkerFromInstance(
WorkerProcessHost::WorkerInstance instance) {
-
// TODO(michaeln): We need to ensure that a process is working
// on behalf of a single profile. The process sharing logic below
// does not ensure that. Consider making WorkerService a per profile
@@ -145,17 +251,17 @@
WorkerProcessHost::WorkerInstance* existing_instance =
FindSharedWorkerInstance(
instance.url(), instance.name(), instance.off_the_record());
- WorkerProcessHost::WorkerInstance::SenderInfo sender_info =
- instance.GetSender();
+ WorkerProcessHost::WorkerInstance::FilterInfo filter_info =
+ instance.GetFilter();
// If this worker is already running, no need to create a new copy. Just
// inform the caller that the worker has been created.
if (existing_instance) {
- // Walk the worker's sender list to see if this client is listed. If not,
+ // Walk the worker's filter list to see if this client is listed. If not,
// then it means that the worker started by the client already exited so
// we should not attach to this new one (http://crbug.com/29243).
- if (!existing_instance->HasSender(sender_info.first, sender_info.second))
+ if (!existing_instance->HasFilter(filter_info.first, filter_info.second))
return false;
- sender_info.first->Send(new ViewMsg_WorkerCreated(sender_info.second));
+ filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second));
return true;
}
@@ -163,38 +269,38 @@
WorkerProcessHost::WorkerInstance* pending = FindPendingInstance(
instance.url(), instance.name(), instance.off_the_record());
// If there's no instance *and* no pending instance (or there is a pending
- // instance but it does not contain our sender info), then it means the
+ // instance but it does not contain our filter info), then it means the
// worker started up and exited already. Log a warning because this should
// be a very rare occurrence and is probably a bug, but it *can* happen so
// handle it gracefully.
if (!pending ||
- !pending->HasSender(sender_info.first, sender_info.second)) {
+ !pending->HasFilter(filter_info.first, filter_info.second)) {
DLOG(WARNING) << "Pending worker already exited";
return false;
}
- // Assign the accumulated document set and sender list for this pending
+ // Assign the accumulated document set and filter list for this pending
// worker to the new instance.
DCHECK(!pending->worker_document_set()->IsEmpty());
instance.ShareDocumentSet(*pending);
- for (WorkerProcessHost::WorkerInstance::SenderList::const_iterator i =
- pending->senders().begin();
- i != pending->senders().end(); ++i) {
- instance.AddSender(i->first, i->second);
+ for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i =
+ pending->filters().begin();
+ i != pending->filters().end(); ++i) {
+ instance.AddFilter(i->first, i->second);
}
RemovePendingInstances(
instance.url(), instance.name(), instance.off_the_record());
- // Remove any queued instances of this worker and copy over the sender to
+ // Remove any queued instances of this worker and copy over the filter to
// this instance.
for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin();
iter != queued_workers_.end();) {
if (iter->Matches(instance.url(), instance.name(),
instance.off_the_record())) {
- DCHECK(iter->NumSenders() == 1);
- WorkerProcessHost::WorkerInstance::SenderInfo sender_info =
- iter->GetSender();
- instance.AddSender(sender_info.first, sender_info.second);
+ DCHECK(iter->NumFilters() == 1);
+ WorkerProcessHost::WorkerInstance::FilterInfo filter_info =
+ iter->GetFilter();
+ instance.AddFilter(filter_info.first, filter_info.second);
iter = queued_workers_.erase(iter);
} else {
++iter;
@@ -203,9 +309,15 @@
}
if (!worker) {
- worker = new WorkerProcessHost(resource_dispatcher_host_,
- instance.request_context());
- if (!worker->Init()) {
+ WorkerMessageFilter* first_filter = instance.filters().begin()->first;
+ worker = new WorkerProcessHost(
+ first_filter->resource_dispatcher_host(),
+ instance.request_context());
+ // TODO(atwilson): This won't work if the message is from a worker process.
+ // We don't support that yet though (this message is only sent from
+ // renderers) but when we do, we'll need to add code to pass in the current
+ // worker's document set for nested workers.
+ if (!worker->Init(first_filter->render_process_id())) {
delete worker;
return false;
}
@@ -219,127 +331,6 @@
return true;
}
-bool WorkerService::LookupSharedWorker(
- const GURL &url,
- const string16& name,
- bool off_the_record,
- unsigned long long document_id,
- int renderer_id,
- int render_view_route_id,
- IPC::Message::Sender* sender,
- int sender_route_id,
- bool* url_mismatch) {
- bool found_instance = true;
- WorkerProcessHost::WorkerInstance* instance =
- FindSharedWorkerInstance(url, name, off_the_record);
-
- if (!instance) {
- // If no worker instance currently exists, we need to create a pending
- // instance - this is to make sure that any subsequent lookups passing a
- // mismatched URL get the appropriate url_mismatch error at lookup time.
- // Having named shared workers was a Really Bad Idea due to details like
- // this.
- instance = CreatePendingInstance(url, name, off_the_record);
- found_instance = false;
- }
-
- // Make sure the passed-in instance matches the URL - if not, return an
- // error.
- if (url != instance->url()) {
- *url_mismatch = true;
- return false;
- } else {
- *url_mismatch = false;
- }
-
- // Add our route ID to the existing instance so we can send messages to it.
- instance->AddSender(sender, sender_route_id);
-
- // Add the passed sender/document_id to the worker instance.
- instance->worker_document_set()->Add(
- sender, document_id, renderer_id, render_view_route_id);
- return found_instance;
-}
-
-void WorkerService::DocumentDetached(IPC::Message::Sender* sender,
- unsigned long long document_id) {
- for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
- !iter.Done(); ++iter) {
- WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
- worker->DocumentDetached(sender, document_id);
- }
-
- // Remove any queued shared workers for this document.
- for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin();
- iter != queued_workers_.end();) {
- if (iter->shared()) {
- iter->worker_document_set()->Remove(sender, document_id);
- if (iter->worker_document_set()->IsEmpty()) {
- iter = queued_workers_.erase(iter);
- continue;
- }
- }
- ++iter;
- }
-
- // Remove the document from any pending shared workers.
- for (WorkerProcessHost::Instances::iterator iter =
- pending_shared_workers_.begin();
- iter != pending_shared_workers_.end(); ) {
- iter->worker_document_set()->Remove(sender, document_id);
- if (iter->worker_document_set()->IsEmpty()) {
- iter = pending_shared_workers_.erase(iter);
- } else {
- ++iter;
- }
- }
-}
-
-void WorkerService::CancelCreateDedicatedWorker(IPC::Message::Sender* sender,
- int sender_route_id) {
- for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin();
- i != queued_workers_.end(); ++i) {
- if (i->HasSender(sender, sender_route_id)) {
- DCHECK(!i->shared());
- queued_workers_.erase(i);
- return;
- }
- }
-
- // There could be a race condition where the WebWorkerProxy told us to cancel
- // the worker right as we sent it a message say it's been created. Look at
- // the running workers.
- for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
- !iter.Done(); ++iter) {
- WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
- for (WorkerProcessHost::Instances::const_iterator instance =
- worker->instances().begin();
- instance != worker->instances().end(); ++instance) {
- if (instance->HasSender(sender, sender_route_id)) {
- // Fake a worker destroyed message so that WorkerProcessHost cleans up
- // properly.
- WorkerHostMsg_WorkerContextDestroyed msg(sender_route_id);
- ForwardMessage(msg, sender);
- return;
- }
- }
- }
-
- DCHECK(false) << "Couldn't find worker to cancel";
-}
-
-void WorkerService::ForwardMessage(const IPC::Message& message,
- IPC::Message::Sender* sender) {
- for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
- !iter.Done(); ++iter) {
- WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
- if (worker->FilterMessage(message, sender))
- return;
- }
-
- // TODO(jabdelmalek): tell sender that callee is gone
-}
-
WorkerProcessHost* WorkerService::GetProcessForDomain(const GURL& url) {
int num_processes = 0;
std::string domain =
@@ -398,8 +389,8 @@
parents.begin();
parent_iter != parents.end(); ++parent_iter) {
bool hit_total_worker_limit = false;
- if (TabCanCreateWorkerProcess(parent_iter->renderer_id(),
- parent_iter->render_view_route_id(),
+ if (TabCanCreateWorkerProcess(parent_iter->render_process_id(),
+ parent_iter->render_view_id(),
&hit_total_worker_limit)) {
return true;
}
@@ -413,8 +404,8 @@
return false;
}
-bool WorkerService::TabCanCreateWorkerProcess(int renderer_id,
- int render_view_route_id,
+bool WorkerService::TabCanCreateWorkerProcess(int render_process_id,
+ int render_view_id,
bool* hit_total_worker_limit) {
int total_workers = 0;
int workers_per_tab = 0;
@@ -430,7 +421,7 @@
*hit_total_worker_limit = true;
return false;
}
- if (cur_instance->RendererIsParent(renderer_id, render_view_route_id)) {
+ if (cur_instance->RendererIsParent(render_process_id, render_view_id)) {
workers_per_tab++;
if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate)
return false;
@@ -441,53 +432,7 @@
return true;
}
-void WorkerService::Observe(NotificationType type,
- const NotificationSource& source,
- const NotificationDetails& details) {
- if (type.value == NotificationType::RESOURCE_MESSAGE_FILTER_SHUTDOWN) {
- RenderMessageFilter* sender = Source<RenderMessageFilter>(source).ptr();
- SenderShutdown(sender);
- } else if (type.value == NotificationType::WORKER_PROCESS_HOST_SHUTDOWN) {
- WorkerProcessHost* sender = Source<WorkerProcessHost>(source).ptr();
- SenderShutdown(sender);
- WorkerProcessDestroyed(sender);
- } else {
- NOTREACHED();
- }
-}
-
-void WorkerService::SenderShutdown(IPC::Message::Sender* sender) {
- for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
- !iter.Done(); ++iter) {
- WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
- worker->SenderShutdown(sender);
- }
-
- // See if that render process had any queued workers.
- for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin();
- i != queued_workers_.end();) {
- i->RemoveSenders(sender);
- if (i->NumSenders() == 0) {
- i = queued_workers_.erase(i);
- } else {
- ++i;
- }
- }
-
- // Also, see if that render process had any pending shared workers.
- for (WorkerProcessHost::Instances::iterator iter =
- pending_shared_workers_.begin();
- iter != pending_shared_workers_.end(); ) {
- iter->worker_document_set()->RemoveAll(sender);
- if (iter->worker_document_set()->IsEmpty()) {
- iter = pending_shared_workers_.erase(iter);
- } else {
- ++iter;
- }
- }
-}
-
-void WorkerService::WorkerProcessDestroyed(WorkerProcessHost* process) {
+void WorkerService::TryStartingQueuedWorker() {
if (queued_workers_.empty())
return;
@@ -510,6 +455,30 @@
}
}
+bool WorkerService::GetRendererForWorker(int worker_process_id,
+ int* render_process_id,
+ int* render_view_id) const {
+ for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);
+ !iter.Done(); ++iter) {
+ if (iter->id() != worker_process_id)
+ continue;
+
+ // This code assumes one worker per process, see function comment in header!
+ WorkerProcessHost* worker = static_cast<WorkerProcessHost*>(*iter);
+ WorkerProcessHost::Instances::const_iterator first_instance =
+ worker->instances().begin();
+ if (first_instance == worker->instances().end())
+ return false;
+
+ WorkerDocumentSet::DocumentInfoSet::const_iterator info =
+ first_instance->worker_document_set()->documents().begin();
+ *render_process_id = info->render_process_id();
+ *render_view_id = info->render_view_id();
+ return true;
+ }
+ return false;
+}
+
const WorkerProcessHost::WorkerInstance* WorkerService::FindWorkerInstance(
int worker_process_id) {
for (BrowserChildProcessHost::Iterator iter(ChildProcessInfo::WORKER_PROCESS);

Powered by Google App Engine
This is Rietveld 408576698