Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(942)

Unified Diff: content/common/gpu/client/command_buffer_proxy_impl.cc

Issue 1656433002: Sample code: IPC Transport object for GPU Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: GpuMemoryBufferService + Transport object. TODO: Eliminate ChildThreadImpl dependency Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/common/gpu/client/command_buffer_proxy_impl.h ('k') | content/common/gpu/client/gpu_channel_host.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/common/gpu/client/command_buffer_proxy_impl.cc
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc
index 027936fd1d1d355f1cb50ae9003487dc3cb9cf6e..88eb485b5b2a83f9fbbb6fcbdc6d3e0a1ac4bd25 100644
--- a/content/common/gpu/client/command_buffer_proxy_impl.cc
+++ b/content/common/gpu/client/command_buffer_proxy_impl.cc
@@ -14,10 +14,12 @@
#include "base/trace_event/trace_event.h"
#include "content/common/child_process_messages.h"
#include "content/common/gpu/client/gpu_channel_host.h"
+#include "content/common/gpu/client/gpu_channel_host_factory.h"
#include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/view_messages.h"
+#include "content/common/gpu/client/ipc/gpu_host_ipc_transport_factory.h"
+#include "content/common/gpu/command_buffer_console_message.h"
+#include "content/common/gpu/create_image_params.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/common/command_buffer_shared.h"
@@ -29,21 +31,13 @@
namespace content {
-namespace {
-
-uint64_t CommandBufferProxyID(int channel_id, int32_t route_id) {
- return (static_cast<uint64_t>(channel_id) << 32) | route_id;
-}
-
-} // namespace
-
-CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel,
- int32_t route_id,
- int32_t stream_id)
+CommandBufferProxyImpl::CommandBufferProxyImpl(
+ GpuChannelHost* channel,
+ scoped_ptr<CommandBufferIPCTransport> transport,
+ int32_t stream_id)
: lock_(nullptr),
channel_(channel),
- command_buffer_id_(CommandBufferProxyID(channel->channel_id(), route_id)),
- route_id_(route_id),
+ transport_(std::move(transport)),
stream_id_(stream_id),
flush_count_(0),
last_put_offset_(-1),
@@ -54,44 +48,19 @@ CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel,
next_signal_id_(0),
weak_this_(AsWeakPtr()),
callback_thread_(base::ThreadTaskRunnerHandle::Get()) {
+ transport_->SetClient(this);
DCHECK(channel);
DCHECK(stream_id);
}
CommandBufferProxyImpl::~CommandBufferProxyImpl() {
- FOR_EACH_OBSERVER(DeletionObserver,
- deletion_observers_,
- OnWillDeleteImpl());
+ FOR_EACH_OBSERVER(DeletionObserver, deletion_observers_, OnWillDeleteImpl());
if (channel_) {
channel_->DestroyCommandBuffer(this);
channel_ = nullptr;
}
}
-bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck,
- OnSignalAck);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
- OnSwapBuffersCompleted);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters,
- OnUpdateVSyncParameters);
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
-
- if (!handled) {
- DLOG(ERROR) << "Gpu process sent invalid message.";
- InvalidGpuMessage();
- }
- return handled;
-}
-
void CommandBufferProxyImpl::OnChannelError() {
scoped_ptr<base::AutoLock> lock;
if (lock_)
@@ -109,6 +78,13 @@ void CommandBufferProxyImpl::OnChannelError() {
OnDestroyed(context_lost_reason, gpu::error::kLostContext);
}
+void CommandBufferProxyImpl::OnConsoleMessage(
+ const CommandBufferConsoleMessage& message) {
+ if (!console_message_callback_.is_null()) {
+ console_message_callback_.Run(message.message, message.id);
+ }
+}
+
void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
gpu::error::Error error) {
CheckLock();
@@ -130,10 +106,10 @@ void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
}
}
-void CommandBufferProxyImpl::OnConsoleMessage(
- const GPUCommandBufferConsoleMessage& message) {
- if (!console_message_callback_.is_null()) {
- console_message_callback_.Run(message.message, message.id);
+void CommandBufferProxyImpl::OnDidHandleMessage() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
}
}
@@ -172,8 +148,8 @@ void CommandBufferProxyImpl::SetContextLostCallback(
bool CommandBufferProxyImpl::Initialize() {
TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
- shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
- sizeof(*shared_state())).release());
+ shared_state_shm_.reset(
+ transport_->AllocateSharedMemory(sizeof(*shared_state())).release());
if (!shared_state_shm_)
return false;
@@ -191,8 +167,7 @@ bool CommandBufferProxyImpl::Initialize() {
return false;
bool result = false;
- if (!Send(new GpuCommandBufferMsg_Initialize(
- route_id_, handle, &result, &capabilities_))) {
+ if (!transport_->Initialize(handle, &result, &capabilities_)) {
LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
return false;
}
@@ -221,9 +196,7 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
if (last_state_.error != gpu::error::kNoError)
return;
- TRACE_EVENT1("gpu",
- "CommandBufferProxyImpl::Flush",
- "put_offset",
+ TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset",
put_offset);
bool put_offset_changed = last_put_offset_ != put_offset;
@@ -232,7 +205,7 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
if (channel_) {
const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
+ transport_.get(), stream_id_, put_offset, ++flush_count_, latency_info_,
put_offset_changed, true);
if (put_offset_changed) {
DCHECK(flush_id);
@@ -261,7 +234,7 @@ void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
if (channel_) {
const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
+ transport_.get(), stream_id_, put_offset, ++flush_count_, latency_info_,
put_offset_changed, false);
if (put_offset_changed) {
DCHECK(flush_id);
@@ -299,18 +272,13 @@ void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) {
CheckLock();
- TRACE_EVENT2("gpu",
- "CommandBufferProxyImpl::WaitForToken",
- "start",
- start,
- "end",
- end);
+ TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start,
+ "end", end);
TryUpdateState();
if (!InRange(start, end, last_state_.token) &&
last_state_.error == gpu::error::kNoError) {
gpu::CommandBuffer::State state;
- if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
- route_id_, start, end, &state)))
+ if (transport_->WaitForTokenInRange(start, end, &state))
OnUpdateState(state);
}
if (!InRange(start, end, last_state_.token) &&
@@ -323,18 +291,13 @@ void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) {
void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
int32_t end) {
CheckLock();
- TRACE_EVENT2("gpu",
- "CommandBufferProxyImpl::WaitForGetOffset",
- "start",
- start,
- "end",
- end);
+ TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start",
+ start, "end", end);
TryUpdateState();
if (!InRange(start, end, last_state_.get_offset) &&
last_state_.error == gpu::error::kNoError) {
gpu::CommandBuffer::State state;
- if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
- route_id_, start, end, &state)))
+ if (transport_->WaitForGetOffsetInRange(start, end, &state))
OnUpdateState(state);
}
if (!InRange(start, end, last_state_.get_offset) &&
@@ -349,7 +312,7 @@ void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
if (last_state_.error != gpu::error::kNoError)
return;
- Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
+ transport_->SetGetBuffer(shm_id);
last_put_offset_ = -1;
}
@@ -365,7 +328,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
int32_t new_id = channel_->ReserveTransferBufferId();
scoped_ptr<base::SharedMemory> shared_memory(
- channel_->factory()->AllocateSharedMemory(size));
+ transport_->AllocateSharedMemory(size));
if (!shared_memory) {
if (last_state_.error == gpu::error::kNoError)
last_state_.error = gpu::error::kOutOfBounds;
@@ -390,10 +353,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
return NULL;
}
- if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
- new_id,
- handle,
- size))) {
+ if (!transport_->RegisterTransferBuffer(new_id, handle, size)) {
return NULL;
}
@@ -408,7 +368,7 @@ void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
if (last_state_.error != gpu::error::kNoError)
return;
- Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
+ transport_->DestroyTransferBuffer(id);
}
gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
@@ -454,7 +414,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
internal_format, gpu_memory_buffer->GetFormat()));
- GpuCommandBufferMsg_CreateImage_Params params;
+ CreateImageParams params;
params.id = new_id;
params.gpu_memory_buffer = handle;
params.size = gfx::Size(width, height);
@@ -462,7 +422,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
params.internal_format = internal_format;
params.image_release_count = image_fence_sync;
- if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_, params)))
+ if (!transport_->CreateImage(params))
return -1;
if (image_fence_sync) {
@@ -485,7 +445,7 @@ void CommandBufferProxyImpl::DestroyImage(int32_t id) {
if (last_state_.error != gpu::error::kNoError)
return;
- Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id));
+ transport_->DestroyImage(id);
}
int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
@@ -510,10 +470,9 @@ uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) {
if (last_state_.error != gpu::error::kNoError)
return 0;
- int32_t stream_id = channel_->GenerateRouteID();
+ int32_t stream_id = MSG_ROUTING_NONE;
bool succeeded = false;
- Send(new GpuCommandBufferMsg_CreateStreamTexture(
- route_id_, texture_id, stream_id, &succeeded));
+ transport_->CreateStreamTexture(texture_id, &stream_id, &succeeded);
if (!succeeded) {
DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
return 0;
@@ -539,7 +498,7 @@ gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
}
uint64_t CommandBufferProxyImpl::GetCommandBufferID() const {
- return command_buffer_id_;
+ return transport_->GetCommandBufferID();
}
int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const {
@@ -590,9 +549,7 @@ void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
return;
uint32_t signal_id = next_signal_id_++;
- if (!Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_,
- sync_token,
- signal_id))) {
+ if (!transport_->SignalSyncToken(sync_token, signal_id)) {
return;
}
@@ -603,7 +560,7 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
const gpu::SyncToken* sync_token) {
// Can only wait on an unverified sync token if it is from the same channel.
const uint64_t token_channel = sync_token->command_buffer_id() >> 32;
- const uint64_t channel = command_buffer_id_ >> 32;
+ const uint64_t channel = transport_->GetCommandBufferID() >> 32;
if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
token_channel != channel) {
return false;
@@ -635,9 +592,7 @@ void CommandBufferProxyImpl::SignalQuery(uint32_t query,
// could do that, all they would do is to prevent some callbacks from getting
// called, leading to stalled threads and/or memory leaks.
uint32_t signal_id = next_signal_id_++;
- if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
- query,
- signal_id))) {
+ if (!transport_->SignalQuery(query, signal_id)) {
return;
}
@@ -649,60 +604,35 @@ bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
if (last_state_.error != gpu::error::kNoError)
return false;
- return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
+ return transport_->ProduceFrontBuffer(mailbox);
}
scoped_ptr<media::VideoDecodeAccelerator>
CommandBufferProxyImpl::CreateVideoDecoder() {
if (!channel_)
return scoped_ptr<media::VideoDecodeAccelerator>();
+ scoped_ptr<GpuVideoDecodeAcceleratorHostIPCTransport> transport(
+ GpuHostIPCTransportFactory::Get()
+ ->CreateVideoDecodeAcceleratorHostIPCTransport());
return scoped_ptr<media::VideoDecodeAccelerator>(
- new GpuVideoDecodeAcceleratorHost(channel_, this));
+ new GpuVideoDecodeAcceleratorHost(channel_, std::move(transport), this));
}
scoped_ptr<media::VideoEncodeAccelerator>
CommandBufferProxyImpl::CreateVideoEncoder() {
if (!channel_)
return scoped_ptr<media::VideoEncodeAccelerator>();
+ scoped_ptr<GpuVideoEncodeAcceleratorHostIPCTransport> transport(
+ GpuHostIPCTransportFactory::Get()
+ ->CreateVideoEncodeAcceleratorHostIPCTransport());
return scoped_ptr<media::VideoEncodeAccelerator>(
- new GpuVideoEncodeAcceleratorHost(channel_, this));
+ new GpuVideoEncodeAcceleratorHost(channel_, std::move(transport), this));
}
gpu::error::Error CommandBufferProxyImpl::GetLastError() {
return last_state_.error;
}
-bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
- // Caller should not intentionally send a message if the context is lost.
- DCHECK(last_state_.error == gpu::error::kNoError);
-
- if (channel_) {
- if (channel_->Send(msg)) {
- return true;
- } else {
- // Flag the command buffer as lost. Defer deleting the channel until
- // OnChannelError is called after returning to the message loop in case
- // it is referenced elsewhere.
- DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
- last_state_.error = gpu::error::kLostContext;
- return false;
- }
- }
-
- // Callee takes ownership of message, regardless of whether Send is
- // successful. See IPC::Sender.
- delete msg;
- return false;
-}
-
-void CommandBufferProxyImpl::OnUpdateState(
- const gpu::CommandBuffer::State& state) {
- // Handle wraparound. It works as long as we don't have more than 2B state
- // updates in flight across which reordering occurs.
- if (state.generation - last_state_.generation < 0x80000000U)
- last_state_ = state;
-}
-
void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
const GpuConsoleMessageCallback& callback) {
CheckLock();
@@ -744,12 +674,25 @@ void CommandBufferProxyImpl::OnSwapBuffersCompleted(
}
}
+void CommandBufferProxyImpl::OnUpdateState(
+ const gpu::CommandBuffer::State& state) {
+ // Handle wraparound. It works as long as we don't have more than 2B state
+ // updates in flight across which reordering occurs.
+ if (state.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state;
+}
+
void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) {
if (!update_vsync_parameters_completion_callback_.is_null())
update_vsync_parameters_completion_callback_.Run(timebase, interval);
}
+void CommandBufferProxyImpl::OnWillHandleMessage() {
+ if (lock_)
+ lock_->Acquire();
+}
+
void CommandBufferProxyImpl::InvalidGpuMessage() {
LOG(ERROR) << "Received invalid message from the GPU process.";
OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext);
« no previous file with comments | « content/common/gpu/client/command_buffer_proxy_impl.h ('k') | content/common/gpu/client/gpu_channel_host.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698