| Index: gpu/command_buffer/service/in_process_command_buffer.cc
|
| diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..c1c2acfe7af27f7bbac5e731a663be13112fb3c1
|
| --- /dev/null
|
| +++ b/gpu/command_buffer/service/in_process_command_buffer.cc
|
| @@ -0,0 +1,610 @@
|
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "gpu/command_buffer/service/in_process_command_buffer.h"
|
| +
|
| +#include <queue>
|
| +#include <utility>
|
| +
|
| +#include <GLES2/gl2.h>
|
| +#ifndef GL_GLEXT_PROTOTYPES
|
| +#define GL_GLEXT_PROTOTYPES 1
|
| +#endif
|
| +#include <GLES2/gl2ext.h>
|
| +#include <GLES2/gl2extchromium.h>
|
| +
|
| +#include "base/bind.h"
|
| +#include "base/bind_helpers.h"
|
| +#include "base/lazy_instance.h"
|
| +#include "base/logging.h"
|
| +#include "base/memory/scoped_ptr.h"
|
| +#include "base/memory/weak_ptr.h"
|
| +#include "base/message_loop/message_loop.h"
|
| +#include "base/message_loop/message_loop_proxy.h"
|
| +#include "base/threading/thread.h"
|
| +#include "gpu/command_buffer/client/share_group.h"
|
| +#include "gpu/command_buffer/common/id_allocator.h"
|
| +#include "gpu/command_buffer/service/command_buffer_service.h"
|
| +#include "gpu/command_buffer/service/context_group.h"
|
| +#include "gpu/command_buffer/service/gl_context_virtual.h"
|
| +#include "gpu/command_buffer/service/gpu_scheduler.h"
|
| +#include "gpu/command_buffer/service/image_manager.h"
|
| +#include "gpu/command_buffer/service/transfer_buffer_manager.h"
|
| +#include "ui/gfx/size.h"
|
| +#include "ui/gl/gl_context.h"
|
| +#include "ui/gl/gl_image.h"
|
| +#include "ui/gl/gl_share_group.h"
|
| +#include "ui/gl/gl_surface.h"
|
| +
|
| +namespace gpu {
|
| +
|
| +namespace {
|
| +
|
| +static base::LazyInstance<std::set<InProcessCommandBuffer*> >
|
| + g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
|
| +
|
| +static bool g_use_virtualized_gl_context = false;
|
| +
|
| +template <typename T>
|
| +static void RunTaskWithResult(base::Callback<T(void)> task,
|
| + T* result,
|
| + base::WaitableEvent* completion) {
|
| + *result = task.Run();
|
| + completion->Signal();
|
| +}
|
| +
|
| +class GpuInProcessThread : public base::Thread {
|
| + public:
|
| + GpuInProcessThread();
|
| + virtual ~GpuInProcessThread();
|
| +
|
| + private:
|
| + DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
|
| +};
|
| +
|
| +GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
|
| + Start();
|
| +}
|
| +
|
| +GpuInProcessThread::~GpuInProcessThread() {
|
| + Stop();
|
| +}
|
| +
|
| +class GpuCommandQueue {
|
| + public:
|
| + GpuCommandQueue();
|
| + virtual ~GpuCommandQueue();
|
| +
|
| + void QueueTask(const base::Closure& task);
|
| + void RunTasks();
|
| + void SetScheduleCallback(const base::Closure& callback);
|
| +
|
| + private:
|
| + base::Lock tasks_lock_;
|
| + std::queue<base::Closure> tasks_;
|
| +
|
| + base::Closure schedule_callback_;
|
| + base::LazyInstance<GpuInProcessThread>::Leaky thread_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
|
| +};
|
| +
|
| +GpuCommandQueue::GpuCommandQueue() {}
|
| +
|
| +GpuCommandQueue::~GpuCommandQueue() {
|
| + base::AutoLock lock(tasks_lock_);
|
| + DCHECK(tasks_.empty());
|
| +}
|
| +
|
| +void GpuCommandQueue::QueueTask(const base::Closure& task) {
|
| + {
|
| + base::AutoLock lock(tasks_lock_);
|
| + tasks_.push(task);
|
| + }
|
| +
|
| + if (!schedule_callback_.is_null()) {
|
| + schedule_callback_.Run();
|
| + return;
|
| + }
|
| + thread_.Get().message_loop()
|
| + ->PostTask(FROM_HERE,
|
| + base::Bind(&GpuCommandQueue::RunTasks,
|
| + base::Unretained(this)));
|
| +}
|
| +
|
| +void GpuCommandQueue::RunTasks() {
|
| + size_t num_tasks;
|
| + {
|
| + base::AutoLock lock(tasks_lock_);
|
| + num_tasks = tasks_.size();
|
| + }
|
| +
|
| + while (num_tasks) {
|
| + base::Closure task;
|
| + {
|
| + base::AutoLock lock(tasks_lock_);
|
| + task = tasks_.front();
|
| + tasks_.pop();
|
| + num_tasks = tasks_.size();
|
| + }
|
| +
|
| + task.Run();
|
| + }
|
| +}
|
| +
|
| +void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
|
| + DCHECK(schedule_callback_.is_null());
|
| + schedule_callback_ = callback;
|
| +}
|
| +
|
| +static base::LazyInstance<GpuCommandQueue>::Leaky g_gpu_queue =
|
| + LAZY_INSTANCE_INITIALIZER;
|
| +
|
| +static void QueueTask(const base::Closure& task) {
|
| + g_gpu_queue.Get().QueueTask(task);
|
| +}
|
| +
|
| +class ScopedEvent {
|
| + public:
|
| + ScopedEvent(base::WaitableEvent* event) : event_(event) {}
|
| + ~ScopedEvent() { event_->Signal(); }
|
| +
|
| + private:
|
| + base::WaitableEvent* event_;
|
| +};
|
| +
|
| +} // anonyous namespace
|
| +
|
| +InProcessCommandBuffer::InProcessCommandBuffer()
|
| + : context_lost_(false),
|
| + last_put_offset_(-1),
|
| + flush_event_(false, false) {}
|
| +
|
| +InProcessCommandBuffer::~InProcessCommandBuffer() {
|
| + Destroy();
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::IsContextLost() {
|
| + if (context_lost_ || !command_buffer_) {
|
| + return true;
|
| + }
|
| + CommandBuffer::State state = GetState();
|
| + return error::IsError(state.error);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
|
| + DCHECK(!surface_->IsOffscreen());
|
| + surface_->Resize(size);
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::MakeCurrent() {
|
| + command_buffer_lock_.AssertAcquired();
|
| +
|
| + if (decoder_->MakeCurrent())
|
| + return true;
|
| + DLOG(ERROR) << "Context lost because MakeCurrent failed.";
|
| + command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
|
| + command_buffer_->SetParseError(gpu::error::kLostContext);
|
| + return false;
|
| +}
|
| +
|
| +void InProcessCommandBuffer::PumpCommands() {
|
| + ScopedEvent handle_flush(&flush_event_);
|
| + command_buffer_lock_.AssertAcquired();
|
| +
|
| + if (!MakeCurrent())
|
| + return;
|
| +
|
| + gpu_scheduler_->PutChanged();
|
| + CommandBuffer::State state = command_buffer_->GetState();
|
| + DCHECK((!error::IsError(state.error) && !context_lost_) ||
|
| + (error::IsError(state.error) && context_lost_));
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
|
| + command_buffer_lock_.AssertAcquired();
|
| + command_buffer_->SetGetBuffer(transfer_buffer_id);
|
| + return true;
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::Initialize(
|
| + bool is_offscreen,
|
| + bool share_resources,
|
| + gfx::AcceleratedWidget window,
|
| + const gfx::Size& size,
|
| + const char* allowed_extensions,
|
| + const std::vector<int32>& attribs,
|
| + gfx::GpuPreference gpu_preference,
|
| + const base::Closure& context_lost_callback,
|
| + scoped_refptr<gles2::ShareGroup>* client_share_group) {
|
| +
|
| + share_resources_ = share_resources;
|
| + context_lost_callback_ = WrapCallback(context_lost_callback);
|
| +
|
| + base::WaitableEvent completion(true, false);
|
| + bool result;
|
| + base::Callback<bool(void)> init_task =
|
| + base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
|
| + base::Unretained(this),
|
| + is_offscreen,
|
| + window,
|
| + size,
|
| + allowed_extensions,
|
| + attribs,
|
| + gpu_preference);
|
| + QueueTask(
|
| + base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
|
| + completion.Wait();
|
| + *client_share_group = client_share_group_;
|
| + return result;
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::InitializeOnGpuThread(
|
| + bool is_offscreen,
|
| + gfx::AcceleratedWidget window,
|
| + const gfx::Size& size,
|
| + const char* allowed_extensions,
|
| + const std::vector<int32>& attribs,
|
| + gfx::GpuPreference gpu_preference) {
|
| + // Use one share group for all contexts.
|
| + CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
|
| + (new gfx::GLShareGroup));
|
| + CR_DEFINE_STATIC_LOCAL(scoped_refptr<gles2::ShareGroup>, client_share_group,
|
| + (new gles2::ShareGroup(true, false)));
|
| +
|
| + DCHECK(size.width() >= 0 && size.height() >= 0);
|
| +
|
| + TransferBufferManager* manager = new TransferBufferManager();
|
| + transfer_buffer_manager_.reset(manager);
|
| + manager->Initialize();
|
| +
|
| + scoped_ptr<CommandBufferService> command_buffer(
|
| + new CommandBufferService(transfer_buffer_manager_.get()));
|
| + command_buffer->SetPutOffsetChangeCallback(base::Bind(
|
| + &InProcessCommandBuffer::PumpCommands, base::Unretained(this)));
|
| + command_buffer->SetParseErrorCallback(base::Bind(
|
| + &InProcessCommandBuffer::OnContextLost, base::Unretained(this)));
|
| +
|
| + if (!command_buffer->Initialize()) {
|
| + LOG(ERROR) << "Could not initialize command buffer.";
|
| + Destroy();
|
| + return false;
|
| + }
|
| +
|
| + InProcessCommandBuffer* context_group = NULL;
|
| +
|
| + if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
|
| + for (std::set<InProcessCommandBuffer*>::iterator it =
|
| + g_all_shared_contexts.Get().begin();
|
| + it != g_all_shared_contexts.Get().end();
|
| + ++it) {
|
| + if (!(*it)->IsContextLost()) {
|
| + context_group = *it;
|
| + break;
|
| + }
|
| + }
|
| + if (!context_group) {
|
| + share_group = new gfx::GLShareGroup;
|
| + client_share_group = new gles2::ShareGroup(true, false);
|
| + }
|
| + }
|
| +
|
| + if (share_resources_)
|
| + client_share_group_ = client_share_group;
|
| +
|
| + // TODO(gman): This needs to be true if this is Pepper.
|
| + bool bind_generates_resource = false;
|
| + decoder_.reset(gles2::GLES2Decoder::Create(
|
| + context_group ? context_group->decoder_->GetContextGroup()
|
| + : new gles2::ContextGroup(
|
| + NULL, NULL, NULL, NULL, bind_generates_resource)));
|
| +
|
| + gpu_scheduler_.reset(
|
| + new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
|
| + command_buffer->SetGetBufferChangeCallback(base::Bind(
|
| + &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
|
| + command_buffer_ = command_buffer.Pass();
|
| +
|
| + decoder_->set_engine(gpu_scheduler_.get());
|
| +
|
| + if (is_offscreen)
|
| + surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
|
| + else
|
| + surface_ = gfx::GLSurface::CreateViewGLSurface(window);
|
| +
|
| + if (!surface_.get()) {
|
| + LOG(ERROR) << "Could not create GLSurface.";
|
| + Destroy();
|
| + return false;
|
| + }
|
| +
|
| + if (g_use_virtualized_gl_context) {
|
| + context_ = share_group->GetSharedContext();
|
| + if (!context_.get()) {
|
| + context_ = gfx::GLContext::CreateGLContext(
|
| + share_group.get(), surface_.get(), gpu_preference);
|
| + share_group->SetSharedContext(context_.get());
|
| + }
|
| +
|
| + context_ = new GLContextVirtual(
|
| + share_group.get(), context_.get(), decoder_->AsWeakPtr());
|
| + if (context_->Initialize(surface_.get(), gpu_preference)) {
|
| + VLOG(1) << "Created virtual GL context.";
|
| + } else {
|
| + context_ = NULL;
|
| + }
|
| + } else {
|
| + context_ = gfx::GLContext::CreateGLContext(
|
| + share_group.get(), surface_.get(), gpu_preference);
|
| + }
|
| +
|
| + if (!context_.get()) {
|
| + LOG(ERROR) << "Could not create GLContext.";
|
| + Destroy();
|
| + return false;
|
| + }
|
| +
|
| + if (!context_->MakeCurrent(surface_.get())) {
|
| + LOG(ERROR) << "Could not make context current.";
|
| + Destroy();
|
| + return false;
|
| + }
|
| +
|
| + gles2::DisallowedFeatures disallowed_features;
|
| + disallowed_features.swap_buffer_complete_callback = true;
|
| + disallowed_features.gpu_memory_manager = true;
|
| + if (!decoder_->Initialize(surface_,
|
| + context_,
|
| + is_offscreen,
|
| + size,
|
| + disallowed_features,
|
| + allowed_extensions,
|
| + attribs)) {
|
| + LOG(ERROR) << "Could not initialize decoder.";
|
| + Destroy();
|
| + return false;
|
| + }
|
| +
|
| + if (!is_offscreen) {
|
| + decoder_->SetResizeCallback(base::Bind(
|
| + &InProcessCommandBuffer::OnResizeView, base::Unretained(this)));
|
| + }
|
| +
|
| + if (share_resources_) {
|
| + g_all_shared_contexts.Pointer()->insert(this);
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +void InProcessCommandBuffer::Destroy() {
|
| + base::WaitableEvent completion(true, false);
|
| + bool result;
|
| + base::Callback<bool(void)> destroy_task = base::Bind(
|
| + &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
|
| + QueueTask(
|
| + base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
|
| + completion.Wait();
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::DestroyOnGpuThread() {
|
| + command_buffer_.reset();
|
| + context_ = NULL;
|
| + surface_ = NULL;
|
| + if (decoder_) {
|
| + decoder_->Destroy(!IsContextLost());
|
| + decoder_.reset();
|
| + }
|
| +
|
| + g_all_shared_contexts.Pointer()->erase(this);
|
| + return true;
|
| +}
|
| +
|
| +unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
|
| + gfx::GpuMemoryBufferHandle buffer,
|
| + gfx::Size size) {
|
| + unsigned int image_id;
|
| + {
|
| + // TODO: ID allocation should go through CommandBuffer
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + gles2::ContextGroup* group = decoder_->GetContextGroup();
|
| + image_id =
|
| + group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
|
| + }
|
| + base::Closure image_task =
|
| + base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
|
| + base::Unretained(this), buffer, size, image_id);
|
| + QueueTask(image_task);
|
| + return image_id;
|
| +}
|
| +
|
| +void InProcessCommandBuffer::CreateImageOnGpuThread(
|
| + gfx::GpuMemoryBufferHandle buffer,
|
| + gfx::Size size,
|
| + unsigned int image_id) {
|
| + scoped_refptr<gfx::GLImage> gl_image =
|
| + gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
|
| + decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
|
| + {
|
| + // TODO: ID allocation should go through CommandBuffer
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + gles2::ContextGroup* group = decoder_->GetContextGroup();
|
| + group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
|
| + }
|
| + base::Closure image_manager_task =
|
| + base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
|
| + base::Unretained(this),
|
| + image_id);
|
| + QueueTask(image_manager_task);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
|
| + decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::OnContextLost() {
|
| + if (!context_lost_callback_.is_null())
|
| + context_lost_callback_.Run();
|
| +
|
| + context_lost_ = true;
|
| + if (share_resources_) {
|
| + for (std::set<InProcessCommandBuffer*>::iterator it =
|
| + g_all_shared_contexts.Get().begin();
|
| + it != g_all_shared_contexts.Get().end();
|
| + ++it) {
|
| + (*it)->context_lost_ = true;
|
| + if (!(*it)->context_lost_callback_.is_null() && (*it) != this)
|
| + (*it)->context_lost_callback_.Run();
|
| + }
|
| + }
|
| +}
|
| +
|
| +CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + return last_state_ = command_buffer_->GetState();
|
| +}
|
| +
|
| +CommandBuffer::State InProcessCommandBuffer::GetState() {
|
| + return GetStateFast();
|
| +}
|
| +
|
| +CommandBuffer::State InProcessCommandBuffer::GetLastState() {
|
| + return last_state_;
|
| +}
|
| +
|
| +int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; }
|
| +
|
| +void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + command_buffer_->Flush(put_offset);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::Flush(int32 put_offset) {
|
| + if (last_state_.error != gpu::error::kNoError)
|
| + return;
|
| +
|
| + if (last_put_offset_ == put_offset)
|
| + return;
|
| +
|
| + last_put_offset_ = put_offset;
|
| + base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
|
| + base::Unretained(this),
|
| + put_offset);
|
| + QueueTask(task);
|
| +}
|
| +
|
| +CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
|
| + int32 last_known_get) {
|
| + if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
|
| + return last_state_;
|
| +
|
| + Flush(put_offset);
|
| + GetStateFast();
|
| + while (last_known_get == last_state_.get_offset &&
|
| + last_state_.error == gpu::error::kNoError) {
|
| + flush_event_.Wait();
|
| + GetStateFast();
|
| + }
|
| +
|
| + return last_state_;
|
| +}
|
| +
|
| +void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
|
| + if (last_state_.error != gpu::error::kNoError)
|
| + return;
|
| +
|
| + {
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + command_buffer_->SetGetBuffer(shm_id);
|
| + last_put_offset_ = 0;
|
| + }
|
| + GetStateFast();
|
| +}
|
| +
|
| +gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
|
| + int32* id) {
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + return command_buffer_->CreateTransferBuffer(size, id);
|
| +}
|
| +
|
| +void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
|
| + base::AutoLock lock(command_buffer_lock_);
|
| + base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
|
| + base::Unretained(command_buffer_.get()),
|
| + id);
|
| +
|
| + QueueTask(task);
|
| +}
|
| +
|
| +gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
|
| + NOTREACHED();
|
| + return gpu::Buffer();
|
| +}
|
| +
|
| +uint32 InProcessCommandBuffer::InsertSyncPoint() {
|
| + NOTREACHED();
|
| + return 0;
|
| +}
|
| +void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
|
| + const base::Closure& callback) {
|
| + QueueTask(WrapCallback(callback));
|
| +}
|
| +
|
| +gpu::error::Error InProcessCommandBuffer::GetLastError() {
|
| + return last_state_.error;
|
| +}
|
| +
|
| +bool InProcessCommandBuffer::Initialize() {
|
| + NOTREACHED();
|
| + return false;
|
| +}
|
| +
|
| +void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
|
| +
|
| +void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
|
| +
|
| +void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
|
| + NOTREACHED();
|
| +}
|
| +
|
| +void InProcessCommandBuffer::SetContextLostReason(
|
| + gpu::error::ContextLostReason reason) {
|
| + NOTREACHED();
|
| +}
|
| +
|
| +static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
|
| + const base::Closure& callback) {
|
| + if (loop != base::MessageLoopProxy::current())
|
| + loop->PostTask(FROM_HERE, callback);
|
| + else
|
| + callback.Run();
|
| +}
|
| +
|
| +base::Closure InProcessCommandBuffer::WrapCallback(
|
| + const base::Closure& callback) {
|
| + base::Closure wrapped_callback =
|
| + base::Bind(&PostCallback, base::MessageLoopProxy::current(), callback);
|
| + return wrapped_callback;
|
| +}
|
| +
|
| +// static
|
| +void InProcessCommandBuffer::EnableVirtualizedContext() {
|
| + g_use_virtualized_gl_context = true;
|
| +}
|
| +
|
| +// static
|
| +void InProcessCommandBuffer::SetScheduleCallback(
|
| + const base::Closure& callback) {
|
| + g_gpu_queue.Get().SetScheduleCallback(callback);
|
| +}
|
| +
|
| +// static
|
| +void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
|
| + g_gpu_queue.Get().RunTasks();
|
| +}
|
| +
|
| +} // namespace gpu
|
|
|