Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(250)

Unified Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 1656433002: Sample code: IPC Transport object for GPU Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: GpuMemoryBufferService + Transport object. TODO: Eliminate ChildThreadImpl dependency Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/client/gpu_channel_host_factory.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/common/gpu/client/gpu_channel_host.cc
diff --git a/content/common/gpu/client/gpu_channel_host.cc b/content/common/gpu/client/gpu_channel_host.cc
index 25328f4539931e008c4d02a347425dac41c3013a..763c1fe63f24dda8d178ed66ac62b88a8d989cb7 100644
--- a/content/common/gpu/client/gpu_channel_host.cc
+++ b/content/common/gpu/client/gpu_channel_host.cc
@@ -17,9 +17,10 @@
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "content/common/gpu/client/command_buffer_proxy_impl.h"
+#include "content/common/gpu/client/gpu_channel_host_factory.h"
#include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_sync_message_filter.h"
+#include "content/common/gpu/client/ipc/gpu_host_ipc_transport_factory.h"
+#include "content/common/gpu/gpu_create_command_buffer_config.h"
#include "url/gurl.h"
#if defined(OS_WIN) || defined(OS_MACOSX)
@@ -41,7 +42,7 @@ GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
flushed_stream_flush_id(0),
verified_stream_flush_id(0),
flush_pending(false),
- route_id(MSG_ROUTING_NONE),
+ transport(nullptr),
put_offset(0),
flush_count(0),
flush_id(0) {}
@@ -50,91 +51,26 @@ GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
// static
scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
- GpuChannelHostFactory* factory,
- int channel_id,
+ scoped_ptr<GpuChannelHostIPCTransport> transport,
const gpu::GPUInfo& gpu_info,
- const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event,
gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
- DCHECK(factory->IsMainThread());
- scoped_refptr<GpuChannelHost> host =
- new GpuChannelHost(factory, channel_id, gpu_info,
- gpu_memory_buffer_manager);
- host->Connect(channel_handle, shutdown_event);
- return host;
+ return new GpuChannelHost(std::move(transport), gpu_info,
+ gpu_memory_buffer_manager);
}
GpuChannelHost::GpuChannelHost(
- GpuChannelHostFactory* factory,
- int channel_id,
+ scoped_ptr<GpuChannelHostIPCTransport> transport,
const gpu::GPUInfo& gpu_info,
gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
- : factory_(factory),
- channel_id_(channel_id),
- gpu_info_(gpu_info),
- gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
+ : gpu_info_(gpu_info),
+ gpu_memory_buffer_manager_(gpu_memory_buffer_manager),
+ transport_(std::move(transport)) {
next_image_id_.GetNext();
- next_route_id_.GetNext();
next_stream_id_.GetNext();
}
-void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event) {
- DCHECK(factory_->IsMainThread());
- // Open a channel to the GPU process. We pass NULL as the main listener here
- // since we need to filter everything to route it to the right thread.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- channel_ =
- IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, NULL,
- io_task_runner.get(), true, shutdown_event);
-
- sync_filter_ = channel_->CreateSyncMessageFilter();
-
- channel_filter_ = new MessageFilter();
-
- // Install the filter last, because we intercept all leftover
- // messages.
- channel_->AddFilter(channel_filter_.get());
-}
-
-bool GpuChannelHost::Send(IPC::Message* msg) {
- // Callee takes ownership of message, regardless of whether Send is
- // successful. See IPC::Sender.
- scoped_ptr<IPC::Message> message(msg);
- // The GPU process never sends synchronous IPCs so clear the unblock flag to
- // preserve order.
- message->set_unblock(false);
-
- // Currently we need to choose between two different mechanisms for sending.
- // On the main thread we use the regular channel Send() method, on another
- // thread we use SyncMessageFilter. We also have to be careful interpreting
- // IsMainThread() since it might return false during shutdown,
- // impl we are actually calling from the main thread (discard message then).
- //
- // TODO: Can we just always use sync_filter_ since we setup the channel
- // without a main listener?
- if (factory_->IsMainThread()) {
- // channel_ is only modified on the main thread, so we don't need to take a
- // lock here.
- if (!channel_) {
- DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
- return false;
- }
- // http://crbug.com/125264
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
- bool result = channel_->Send(message.release());
- if (!result)
- DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
- return result;
- }
-
- bool result = sync_filter_->Send(message.release());
- return result;
-}
-
uint32_t GpuChannelHost::OrderingBarrier(
- int32_t route_id,
+ CommandBufferIPCTransport* transport,
int32_t stream_id,
int32_t put_offset,
uint32_t flush_count,
@@ -143,13 +79,13 @@ uint32_t GpuChannelHost::OrderingBarrier(
bool do_flush) {
AutoLock lock(context_lock_);
StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- if (flush_info.flush_pending && flush_info.route_id != route_id)
+ if (flush_info.flush_pending && flush_info.transport != transport)
InternalFlush(&flush_info);
if (put_offset_changed) {
const uint32_t flush_id = flush_info.next_stream_flush_id++;
flush_info.flush_pending = true;
- flush_info.route_id = route_id;
+ flush_info.transport = transport;
flush_info.put_offset = put_offset;
flush_info.flush_count = flush_count;
flush_info.flush_id = flush_id;
@@ -180,9 +116,8 @@ void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
DCHECK(flush_info);
DCHECK(flush_info->flush_pending);
DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
- Send(new GpuCommandBufferMsg_AsyncFlush(
- flush_info->route_id, flush_info->put_offset, flush_info->flush_count,
- flush_info->latency_info));
+ transport_->AsyncFlush(flush_info->transport, flush_info->put_offset,
+ flush_info->flush_count, flush_info->latency_info);
flush_info->latency_info.clear();
flush_info->flush_pending = false;
@@ -203,40 +138,29 @@ scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
"surface_id",
surface_id);
- GPUCreateCommandBufferConfig init_params;
+ content::GpuCreateCommandBufferConfig init_params;
init_params.share_group_id =
- share_group ? share_group->route_id() : MSG_ROUTING_NONE;
+ share_group ? share_group->transport()->GetShareGroupID() : -2;
init_params.stream_id = stream_id;
init_params.stream_priority = stream_priority;
init_params.attribs = attribs;
init_params.active_url = active_url;
init_params.gpu_preference = gpu_preference;
- int32_t route_id = GenerateRouteID();
-
- CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
- surface_id, init_params, route_id);
+ CreateCommandBufferResult result =
+ CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
+ scoped_ptr<CommandBufferIPCTransport> command_buffer_transport(
+ GpuHostIPCTransportFactory::Get()->CreateCommandBufferIPCTransport());
+ transport_->CreateViewCommandBuffer(surface_id, init_params,
+ command_buffer_transport.get(), &result);
if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
-
- if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
- // The GPU channel needs to be considered lost. The caller will
- // then set up a new connection, and the GPU channel and any
- // view command buffers will all be associated with the same GPU
- // process.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(
- FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
- channel_filter_.get()));
- }
-
- return NULL;
+ return nullptr;
}
scoped_ptr<CommandBufferProxyImpl> command_buffer =
- make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
- AddRoute(route_id, command_buffer->AsWeakPtr());
+ make_scoped_ptr(new CommandBufferProxyImpl(
+ this, std::move(command_buffer_transport), stream_id));
return command_buffer;
}
@@ -252,123 +176,85 @@ scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateOffscreenCommandBuffer(
DCHECK(!share_group || (stream_id == share_group->stream_id()));
TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
- GPUCreateCommandBufferConfig init_params;
+ GpuCreateCommandBufferConfig init_params;
init_params.share_group_id =
- share_group ? share_group->route_id() : MSG_ROUTING_NONE;
+ share_group ? share_group->transport()->GetShareGroupID() : -2;
init_params.stream_id = stream_id;
init_params.stream_priority = stream_priority;
init_params.attribs = attribs;
init_params.active_url = active_url;
init_params.gpu_preference = gpu_preference;
- int32_t route_id = GenerateRouteID();
+ scoped_ptr<CommandBufferIPCTransport> command_buffer_transport(
+ GpuHostIPCTransportFactory::Get()->CreateCommandBufferIPCTransport());
bool succeeded = false;
- if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
- size, init_params, route_id, &succeeded))) {
- LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
- return NULL;
+ if (!transport_->CreateOffscreenCommandBuffer(
+ size, init_params, command_buffer_transport.get(), &succeeded)) {
+ LOG(ERROR) << "Failed to send CreateOffscreenCommandBuffer.";
+ return nullptr;
}
if (!succeeded) {
- LOG(ERROR)
- << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
- return NULL;
+ LOG(ERROR) << "CreateOffscreenCommandBuffer returned failure.";
+ return nullptr;
}
scoped_ptr<CommandBufferProxyImpl> command_buffer =
- make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
- AddRoute(route_id, command_buffer->AsWeakPtr());
+ make_scoped_ptr(new CommandBufferProxyImpl(
+ this, std::move(command_buffer_transport), stream_id));
return command_buffer;
}
scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder(
media::JpegDecodeAccelerator::Client* client) {
- TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder");
-
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- int32_t route_id = GenerateRouteID();
- scoped_ptr<GpuJpegDecodeAcceleratorHost> decoder(
- new GpuJpegDecodeAcceleratorHost(this, route_id, io_task_runner));
- if (!decoder->Initialize(client)) {
+ scoped_ptr<GpuJpegDecodeAcceleratorHostIPCTransport> jpeg_decode_transport(
+ GpuHostIPCTransportFactory::Get()
+ ->CreateJpegDecodeAcceleratorHostIPCTransport());
+
+ bool succeeded = false;
+ if (!transport_->CreateJpegDecoder(jpeg_decode_transport.get(), &succeeded)) {
+ LOG(ERROR) << "Failed to send CreateJpegDecoder";
+ return nullptr;
+ }
+
+ if (!succeeded) {
+ LOG(ERROR) << "CreateJpegDecoder returned failure.";
return nullptr;
}
- // The reply message of jpeg decoder should run on IO thread.
- io_task_runner->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
- channel_filter_.get(), route_id,
- decoder->GetReceiver(), io_task_runner));
+ scoped_ptr<media::JpegDecodeAccelerator> decoder(
+ new GpuJpegDecodeAcceleratorHost(this, std::move(jpeg_decode_transport)));
+ if (!decoder->Initialize(client))
+ return nullptr;
- return std::move(decoder);
+ return decoder;
}
void GpuChannelHost::DestroyCommandBuffer(
CommandBufferProxyImpl* command_buffer) {
TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
- int32_t route_id = command_buffer->route_id();
+ transport_->DestroyCommandBuffer(command_buffer->transport());
int32_t stream_id = command_buffer->stream_id();
- Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
- RemoveRoute(route_id);
AutoLock lock(context_lock_);
StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- if (flush_info.flush_pending && flush_info.route_id == route_id)
+ if (flush_info.flush_pending &&
+ flush_info.transport == command_buffer->transport())
flush_info.flush_pending = false;
}
void GpuChannelHost::DestroyChannel() {
- DCHECK(factory_->IsMainThread());
AutoLock lock(context_lock_);
- channel_.reset();
-}
-
-void GpuChannelHost::AddRoute(
- int route_id, base::WeakPtr<IPC::Listener> listener) {
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
- channel_filter_.get(), route_id, listener,
- base::ThreadTaskRunnerHandle::Get()));
-}
-
-void GpuChannelHost::RemoveRoute(int route_id) {
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(
- FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
- channel_filter_.get(), route_id));
+ transport_.reset();
}
base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
base::SharedMemoryHandle source_handle) {
- if (IsLost())
- return base::SharedMemory::NULLHandle();
-
-#if defined(OS_WIN) || defined(OS_MACOSX)
- // Windows and Mac need to explicitly duplicate the handle out to another
- // process.
- base::SharedMemoryHandle target_handle;
- base::ProcessId peer_pid;
- {
- AutoLock lock(context_lock_);
- if (!channel_)
- return base::SharedMemory::NULLHandle();
- peer_pid = channel_->GetPeerPID();
- }
- bool success = BrokerDuplicateSharedMemoryHandle(source_handle, peer_pid,
- &target_handle);
- if (!success)
- return base::SharedMemory::NULLHandle();
-
- return target_handle;
-#else
- return base::SharedMemory::DuplicateHandle(source_handle);
-#endif // defined(OS_WIN) || defined(OS_MACOSX)
+ AutoLock lock(context_lock_);
+ return transport_->ShareToGpuProcess(source_handle);
}
int32_t GpuChannelHost::ReserveTransferBufferId() {
@@ -404,10 +290,6 @@ int32_t GpuChannelHost::ReserveImageId() {
return next_image_id_.GetNext();
}
-int32_t GpuChannelHost::GenerateRouteID() {
- return next_route_id_.GetNext();
-}
-
int32_t GpuChannelHost::GenerateStreamID() {
const int32_t stream_id = next_stream_id_.GetNext();
DCHECK_NE(0, stream_id);
@@ -444,7 +326,7 @@ uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id,
return verified_stream_flush_id;
}
- if (Send(new GpuChannelMsg_Nop())) {
+ if (transport_->Nop()) {
// Update verified flush id for all streams.
uint32_t highest_flush_id = 0;
AutoLock lock(context_lock_);
@@ -475,78 +357,9 @@ uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) {
GpuChannelHost::~GpuChannelHost() {
#if DCHECK_IS_ON()
AutoLock lock(context_lock_);
- DCHECK(!channel_)
+ DCHECK(!transport_)
<< "GpuChannelHost::DestroyChannel must be called before destruction.";
#endif
}
-GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
-
-GpuChannelHost::MessageFilter::ListenerInfo::~ListenerInfo() {}
-
-GpuChannelHost::MessageFilter::MessageFilter()
- : lost_(false) {
-}
-
-GpuChannelHost::MessageFilter::~MessageFilter() {}
-
-void GpuChannelHost::MessageFilter::AddRoute(
- int32_t route_id,
- base::WeakPtr<IPC::Listener> listener,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
- DCHECK(listeners_.find(route_id) == listeners_.end());
- DCHECK(task_runner);
- ListenerInfo info;
- info.listener = listener;
- info.task_runner = task_runner;
- listeners_[route_id] = info;
-}
-
-void GpuChannelHost::MessageFilter::RemoveRoute(int32_t route_id) {
- listeners_.erase(route_id);
-}
-
-bool GpuChannelHost::MessageFilter::OnMessageReceived(
- const IPC::Message& message) {
- // Never handle sync message replies or we will deadlock here.
- if (message.is_reply())
- return false;
-
- auto it = listeners_.find(message.routing_id());
- if (it == listeners_.end())
- return false;
-
- const ListenerInfo& info = it->second;
- info.task_runner->PostTask(
- FROM_HERE,
- base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
- info.listener, message));
- return true;
-}
-
-void GpuChannelHost::MessageFilter::OnChannelError() {
- // Set the lost state before signalling the proxies. That way, if they
- // themselves post a task to recreate the context, they will not try to re-use
- // this channel host.
- {
- AutoLock lock(lock_);
- lost_ = true;
- }
-
- // Inform all the proxies that an error has occurred. This will be reported
- // via OpenGL as a lost context.
- for (const auto& kv : listeners_) {
- const ListenerInfo& info = kv.second;
- info.task_runner->PostTask(
- FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
- }
-
- listeners_.clear();
-}
-
-bool GpuChannelHost::MessageFilter::IsLost() const {
- AutoLock lock(lock_);
- return lost_;
-}
-
} // namespace content
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/client/gpu_channel_host_factory.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698