Chromium Code Reviews| Index: gpu/command_buffer/service/in_process_command_buffer.cc |
| diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..e78de08794110ec8a221ebcb025f39a7e085a532 |
| --- /dev/null |
| +++ b/gpu/command_buffer/service/in_process_command_buffer.cc |
| @@ -0,0 +1,546 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "gpu/command_buffer/service/in_process_command_buffer.h" |
| + |
| +#include <queue> |
| +#include <utility> |
| +#include <vector> |
| + |
| +#include <GLES2/gl2.h> |
| +#ifndef GL_GLEXT_PROTOTYPES |
| +#define GL_GLEXT_PROTOTYPES 1 |
| +#endif |
| +#include <GLES2/gl2ext.h> |
| +#include <GLES2/gl2extchromium.h> |
| + |
| +#include "base/bind.h" |
| +#include "base/bind_helpers.h" |
| +#include "base/lazy_instance.h" |
| +#include "base/logging.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/memory/weak_ptr.h" |
| +#include "base/message_loop/message_loop.h" |
| +#include "base/message_loop/message_loop_proxy.h" |
| +#include "base/synchronization/waitable_event.h" |
| +#include "base/threading/non_thread_safe.h" |
| +#include "base/threading/thread.h" |
| +#include "gpu/command_buffer/common/id_allocator.h" |
| +#include "gpu/command_buffer/service/command_buffer_service.h" |
| +#include "gpu/command_buffer/service/context_group.h" |
| +#include "gpu/command_buffer/service/gl_context_virtual.h" |
| +#include "gpu/command_buffer/service/gpu_scheduler.h" |
| +#include "gpu/command_buffer/service/image_manager.h" |
| +#include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| +#include "ui/gfx/size.h" |
| +#include "ui/gl/gl_context.h" |
| +#include "ui/gl/gl_image.h" |
| +#include "ui/gl/gl_share_group.h" |
| +#include "ui/gl/gl_surface.h" |
| + |
| +namespace gpu { |
| + |
| +namespace { |
| + |
| +static base::LazyInstance<std::set<InProcessCommandBuffer*> > |
| + g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; |
| + |
| +static bool g_use_virtualized_gl_context = false; |
| + |
| +template <typename T> |
| +static void RunTaskWithResult(base::Callback<T(void)> task, |
| + T* result, |
| + base::WaitableEvent* completion) { |
| + *result = task.Run(); |
| + completion->Signal(); |
| +} |
| + |
| +class GpuCommandQueue { |
| + public: |
| + GpuCommandQueue(); |
| + virtual ~GpuCommandQueue(); |
| + |
| + void QueueTask(const base::Closure& task); |
| + void RunTasks(); |
| + |
| + private: |
| + base::Lock tasks_lock_; |
| + std::queue<base::Closure> tasks_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); |
| +}; |
| + |
| +GpuCommandQueue::GpuCommandQueue() { |
| +} |
| + |
| +GpuCommandQueue::~GpuCommandQueue() { |
| + base::AutoLock lock(tasks_lock_); |
| + DCHECK(tasks_.empty()); |
| +} |
| + |
| +void GpuCommandQueue::QueueTask(const base::Closure& task) { |
| + base::AutoLock lock(tasks_lock_); |
| + tasks_.push(task); |
| +} |
| + |
| +void GpuCommandQueue::RunTasks() { |
| + size_t num_tasks; |
| + { |
| + base::AutoLock lock(tasks_lock_); |
| + num_tasks = tasks_.size(); |
| + } |
| + |
| + while (num_tasks) { |
| + base::Closure task; |
| + { |
| + base::AutoLock lock(tasks_lock_); |
| + task = tasks_.front(); |
| + tasks_.pop(); |
| + num_tasks = tasks_.size(); |
| + } |
| + |
| + task.Run(); |
| + } |
| +} |
| + |
| +static base::LazyInstance<base::Closure> g_schedule_work_callback = |
| + LAZY_INSTANCE_INITIALIZER; |
| + |
| +class GpuInProcessThread : public base::Thread { |
| + public: |
| + GpuInProcessThread(); |
| + virtual ~GpuInProcessThread(); |
| + |
| + private: |
| + DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); |
| +}; |
| + |
| +GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { |
| + Start(); |
| +} |
| + |
| +GpuInProcessThread::~GpuInProcessThread() {} |
| + |
| +static base::LazyInstance<GpuInProcessThread> g_gpu_thread = |
| + LAZY_INSTANCE_INITIALIZER; |
| + |
| +static base::LazyInstance<GpuCommandQueue> g_gpu_queue = |
| + LAZY_INSTANCE_INITIALIZER; |
|
piman
2013/07/23 01:57:14
I'm fairly uncomfortable with all these globals be
no sievers
2013/07/25 00:41:23
Done.
|
| + |
| +static void QueueTask(const base::Closure& task) { |
| + g_gpu_queue.Get().QueueTask(task); |
| + |
| + if (!g_schedule_work_callback.Get().is_null()) { |
| + g_schedule_work_callback.Get().Run(); |
| + return; |
| + } |
| + g_gpu_thread.Get().message_loop() |
| + ->PostTask(FROM_HERE, |
| + base::Bind(&GpuCommandQueue::RunTasks, |
| + base::Unretained(g_gpu_queue.Pointer()))); |
| +} |
| + |
| +} // anonyous namespace |
| + |
| +InProcessCommandBuffer::InProcessCommandBuffer() |
| + : context_lost_(false), last_put_offset_(-1) {} |
| + |
| +InProcessCommandBuffer::~InProcessCommandBuffer() { |
| + Destroy(); |
| +} |
| + |
| +bool InProcessCommandBuffer::IsContextLost() { |
| + if (context_lost_ || !command_buffer_) { |
| + return true; |
| + } |
| + CommandBuffer::State state = GetState(); |
| + return error::IsError(state.error); |
| +} |
| + |
| +void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { |
| + DCHECK(!surface_->IsOffscreen()); |
| + surface_->Resize(size); |
| +} |
| + |
| +bool InProcessCommandBuffer::MakeCurrent() { |
| + if (decoder_->MakeCurrent()) |
| + return true; |
| + DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
| + command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| + command_buffer_->SetParseError(gpu::error::kLostContext); |
| + return false; |
| +} |
| + |
| +void InProcessCommandBuffer::PumpCommands() { |
| + base::AutoLock lock(service_lock_); |
|
piman
2013/07/23 01:57:14
I think it would be safer if the lock was taken be
no sievers
2013/07/25 00:41:23
Oops, my bad. Done.
|
| + |
| + if (!MakeCurrent()) |
| + return; |
| + |
| + gpu_scheduler_->PutChanged(); |
| + CommandBuffer::State state = command_buffer_->GetState(); |
| + DCHECK((!error::IsError(state.error) && !context_lost_) || |
| + (error::IsError(state.error) && context_lost_)); |
| +} |
| + |
| +bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { |
| + command_buffer_->SetGetBuffer(transfer_buffer_id); |
| + return true; |
| +} |
| + |
| +bool InProcessCommandBuffer::Initialize( |
| + bool is_offscreen, |
| + bool share_resources, |
| + gfx::AcceleratedWidget window, |
| + const gfx::Size& size, |
| + const char* allowed_extensions, |
| + const std::vector<int32>& attribs, |
| + gfx::GpuPreference gpu_preference, |
| + const base::Closure& context_lost_callback) { |
| + |
| + share_resources_ = share_resources; |
| + context_lost_callback_ = WrapCallback(context_lost_callback); |
| + |
| + base::WaitableEvent completion(true, false); |
| + bool result; |
| + base::Callback<bool(void)> init_task = |
| + base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
| + base::Unretained(this), |
|
piman
2013/07/23 01:57:14
What ensures |this| outlives the thread? If the de
no sievers
2013/07/25 00:41:23
Was this comment intended for the Destroy() furthe
|
| + is_offscreen, |
| + window, |
| + size, |
| + allowed_extensions, |
| + attribs, |
| + gpu_preference); |
| + QueueTask( |
| + base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); |
| + completion.Wait(); |
| + return result; |
| +} |
| + |
| +bool InProcessCommandBuffer::InitializeOnGpuThread( |
| + bool is_offscreen, |
| + gfx::AcceleratedWidget window, |
| + const gfx::Size& size, |
| + const char* allowed_extensions, |
| + const std::vector<int32>& attribs, |
| + gfx::GpuPreference gpu_preference) { |
| + // Use one share group for all contexts. |
| + CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, |
| + (new gfx::GLShareGroup)); |
| + |
| + DCHECK(size.width() >= 0 && size.height() >= 0); |
| + |
| + TransferBufferManager* manager = new TransferBufferManager(); |
| + transfer_buffer_manager_.reset(manager); |
| + manager->Initialize(); |
| + |
| + scoped_ptr<CommandBufferService> command_buffer( |
| + new CommandBufferService(transfer_buffer_manager_.get())); |
| + command_buffer->SetPutOffsetChangeCallback(base::Bind( |
| + &InProcessCommandBuffer::PumpCommands, base::Unretained(this))); |
| + command_buffer->SetParseErrorCallback(base::Bind( |
| + &InProcessCommandBuffer::OnContextLost, base::Unretained(this))); |
| + |
| + if (!command_buffer->Initialize()) { |
| + LOG(ERROR) << "Could not initialize command buffer."; |
| + Destroy(); |
| + return false; |
| + } |
| + |
| + InProcessCommandBuffer* context_group = NULL; |
| + |
| + if (share_resources_ && !g_all_shared_contexts.Get().empty()) { |
| + for (std::set<InProcessCommandBuffer*>::iterator it = |
| + g_all_shared_contexts.Get().begin(); |
| + it != g_all_shared_contexts.Get().end(); |
| + ++it) { |
| + if (!(*it)->IsContextLost()) { |
| + context_group = *it; |
| + break; |
| + } |
| + } |
| + if (!context_group) |
| + share_group = new gfx::GLShareGroup; |
| + } |
| + |
| + // TODO(gman): This needs to be true if this is Pepper. |
| + bool bind_generates_resource = false; |
| + decoder_.reset(gles2::GLES2Decoder::Create( |
| + context_group ? context_group->decoder_->GetContextGroup() |
| + : new gles2::ContextGroup( |
| + NULL, NULL, NULL, NULL, bind_generates_resource))); |
| + |
| + gpu_scheduler_.reset( |
| + new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); |
| + command_buffer->SetGetBufferChangeCallback(base::Bind( |
| + &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); |
| + command_buffer_ = command_buffer.Pass(); |
| + |
| + decoder_->set_engine(gpu_scheduler_.get()); |
| + |
| + if (is_offscreen) |
| + surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); |
| + else |
| + surface_ = gfx::GLSurface::CreateViewGLSurface(window); |
| + |
| + if (!surface_.get()) { |
| + LOG(ERROR) << "Could not create GLSurface."; |
| + Destroy(); |
| + return false; |
| + } |
| + |
| + if (g_use_virtualized_gl_context) { |
| + context_ = share_group->GetSharedContext(); |
| + if (!context_.get()) { |
| + context_ = gfx::GLContext::CreateGLContext( |
| + share_group.get(), surface_.get(), gpu_preference); |
| + share_group->SetSharedContext(context_.get()); |
| + } |
| + |
| + context_ = new GLContextVirtual( |
| + share_group.get(), context_.get(), decoder_->AsWeakPtr()); |
| + if (context_->Initialize(surface_.get(), gpu_preference)) { |
| + VLOG(1) << "Created virtual GL context."; |
| + } else { |
| + context_ = NULL; |
| + } |
| + } else { |
| + context_ = gfx::GLContext::CreateGLContext( |
| + share_group.get(), surface_.get(), gpu_preference); |
| + } |
| + |
| + if (!context_.get()) { |
| + LOG(ERROR) << "Could not create GLContext."; |
| + Destroy(); |
| + return false; |
| + } |
| + |
| + if (!context_->MakeCurrent(surface_.get())) { |
| + LOG(ERROR) << "Could not make context current."; |
| + Destroy(); |
| + return false; |
| + } |
| + |
| + gles2::DisallowedFeatures disallowed_features; |
| + disallowed_features.swap_buffer_complete_callback = true; |
| + disallowed_features.gpu_memory_manager = true; |
| + if (!decoder_->Initialize(surface_, |
| + context_, |
| + is_offscreen, |
| + size, |
| + disallowed_features, |
| + allowed_extensions, |
| + attribs)) { |
| + LOG(ERROR) << "Could not initialize decoder."; |
| + Destroy(); |
| + return false; |
| + } |
| + |
| + if (!is_offscreen) { |
| + decoder_->SetResizeCallback(base::Bind( |
| + &InProcessCommandBuffer::OnResizeView, base::Unretained(this))); |
| + } |
| + |
| + if (share_resources_) { |
| + g_all_shared_contexts.Pointer()->insert(this); |
| + } |
| + |
| + return true; |
| +} |
| + |
| +void InProcessCommandBuffer::Destroy() { |
| + base::WaitableEvent completion(true, false); |
| + bool result; |
| + base::Callback<bool(void)> destroy_task = base::Bind( |
| + &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
| + QueueTask( |
| + base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
| + completion.Wait(); |
| +} |
| + |
| +bool InProcessCommandBuffer::DestroyOnGpuThread() { |
| + if (decoder_) { |
| + decoder_->Destroy(!IsContextLost()); |
| + decoder_.reset(); |
| + } |
| + |
| + g_all_shared_contexts.Pointer()->erase(this); |
| + return true; |
| +} |
| + |
| +unsigned int InProcessCommandBuffer::AddImage( |
| + scoped_refptr<gfx::GLImage> image) { |
| + base::AutoLock lock(service_lock_); |
| + gles2::ContextGroup* group = decoder_->GetContextGroup(); |
|
no sievers
2013/07/25 00:41:23
Oops, neither GLImage nor ImageManager are RefCoun
|
| + unsigned int image_id = |
| + group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); |
| + base::Closure image_manager_task = base::Bind( |
| + &gles2::ImageManager::AddImage, group->image_manager(), image, image_id); |
| + QueueTask(image_manager_task); |
| + return image_id; |
| +} |
| + |
| +void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { |
| + base::AutoLock lock(service_lock_); |
| + gles2::ContextGroup* group = decoder_->GetContextGroup(); |
| + group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); |
| + base::Closure image_manager_task = base::Bind( |
| + &gles2::ImageManager::RemoveImage, group->image_manager(), image_id); |
| + QueueTask(image_manager_task); |
| +} |
| + |
| +void InProcessCommandBuffer::OnContextLost() { |
| + if (!context_lost_callback_.is_null()) |
| + context_lost_callback_.Run(); |
| + |
| + context_lost_ = true; |
| + if (share_resources_) { |
| + for (std::set<InProcessCommandBuffer*>::iterator it = |
| + g_all_shared_contexts.Get().begin(); |
| + it != g_all_shared_contexts.Get().end(); |
| + ++it) |
| + (*it)->context_lost_ = true; |
| + } |
| +} |
| + |
| +CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
| + base::AutoLock lock(service_lock_); |
| + return last_state_ = command_buffer_->GetState(); |
| +} |
| + |
| +CommandBuffer::State InProcessCommandBuffer::GetState() { |
| + while (last_put_offset_ != last_state_.get_offset && |
| + last_state_.error == gpu::error::kNoError) { |
| + GetStateFast(); |
| + base::PlatformThread::YieldCurrentThread(); |
| + } |
|
piman
2013/07/23 01:57:14
I'm not sure what you mean to do here. It looks li
no sievers
2013/07/25 00:41:23
Done.
|
| + |
| + return last_state_; |
| +} |
| + |
| +CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
| + return last_state_; |
| +} |
| + |
| +int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; } |
| + |
| +void InProcessCommandBuffer::Flush(int32 put_offset) { |
| + if (last_state_.error != gpu::error::kNoError) |
| + return; |
| + |
| + if (last_put_offset_ == put_offset) |
| + return; |
| + |
| + last_put_offset_ = put_offset; |
| + base::Closure task = base::Bind(&CommandBuffer::Flush, |
| + base::Unretained(command_buffer_.get()), |
| + put_offset); |
| + |
| + QueueTask(task); |
| +} |
| + |
| +CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, |
| + int32 last_known_get) { |
| + Flush(put_offset); |
| + if (last_known_get == last_state_.get_offset) { |
| + GetStateFast(); |
| + base::PlatformThread::YieldCurrentThread(); |
| + } |
|
piman
2013/07/23 01:57:14
The semantics of FlushSync is that it only returns
no sievers
2013/07/25 00:41:23
Hmm, I don't see CommandBufferProxyImpl guarantee
|
| + |
| + return last_state_; |
| +} |
| + |
| +void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { |
| + if (last_state_.error != gpu::error::kNoError) |
| + return; |
| + |
| + { |
| + base::AutoLock lock(service_lock_); |
| + command_buffer_->SetGetBuffer(shm_id); |
| + last_put_offset_ = 0; |
| + } |
| + GetStateFast(); |
| +} |
| + |
| +gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, |
| + int32* id) { |
| + base::AutoLock lock(service_lock_); |
| + return command_buffer_->CreateTransferBuffer(size, id); |
| +} |
| + |
| +void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { |
| + base::AutoLock lock(service_lock_); |
| + command_buffer_->DestroyTransferBuffer(id); |
|
piman
2013/07/23 01:57:14
mmh, this would execute out-of-order wrt the comma
no sievers
2013/07/25 00:41:23
It looked like the client only frees pending an ex
|
| +} |
| + |
| +gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { |
| + NOTREACHED(); |
| + return gpu::Buffer(); |
| +} |
| + |
| +uint32 InProcessCommandBuffer::InsertSyncPoint() { |
| + NOTREACHED(); |
| + return 0; |
| +} |
| +void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, |
| + const base::Closure& callback) { |
| + QueueTask(WrapCallback(callback)); |
| +} |
| + |
| +gpu::error::Error InProcessCommandBuffer::GetLastError() { |
| + return last_state_.error; |
| +} |
| + |
| +bool InProcessCommandBuffer::Initialize() { |
| + NOTREACHED(); |
| + return false; |
| +} |
| + |
| +void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } |
| + |
| +void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); } |
| + |
| +void InProcessCommandBuffer::SetParseError(gpu::error::Error error) { |
| + NOTREACHED(); |
| +} |
| + |
| +void InProcessCommandBuffer::SetContextLostReason( |
| + gpu::error::ContextLostReason reason) { |
| + NOTREACHED(); |
| +} |
| + |
| +static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop, |
| + const base::Closure& callback) { |
| + if (loop != base::MessageLoopProxy::current()) |
| + loop->PostTask(FROM_HERE, callback); |
| + else |
| + callback.Run(); |
| +} |
| + |
| +base::Closure InProcessCommandBuffer::WrapCallback( |
| + const base::Closure& callback) { |
| + base::Closure wrapped_callback = |
| + base::Bind(&PostCallback, base::MessageLoopProxy::current(), callback); |
| + return wrapped_callback; |
| +} |
| + |
| +// static |
| +void InProcessCommandBuffer::EnableVirtualizedContext() { |
| + g_use_virtualized_gl_context = true; |
| +} |
| + |
| +// static |
| +void InProcessCommandBuffer::SetScheduleCallback( |
| + const base::Closure& callback) { |
| + DCHECK(g_schedule_work_callback.Get().is_null()); |
| + g_schedule_work_callback.Get() = callback; |
| +} |
| + |
| +// static |
| +void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { |
| + g_gpu_queue.Get().RunTasks(); |
| +} |
| + |
| +} // namespace gpu |