Index: gpu/command_buffer/service/in_process_command_buffer.cc |
diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc |
index 95e172ba5089e9a56051145ecbb756a6dc8332ff..9e8c2daff50c615f3ec9baddb9a0e06a314f5586 100644 |
--- a/gpu/command_buffer/service/in_process_command_buffer.cc |
+++ b/gpu/command_buffer/service/in_process_command_buffer.cc |
@@ -43,11 +43,7 @@ namespace gpu { |
namespace { |
-static base::LazyInstance<std::set<InProcessCommandBuffer*> > |
- g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; |
- |
static bool g_use_virtualized_gl_context = false; |
boliu
2014/02/08 18:55:54
Can/should this be per-service too?
no sievers
2014/02/12 03:09:15
Good idea. Done.
|
-static bool g_uses_explicit_scheduling = false; |
static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL; |
template <typename T> |
@@ -60,12 +56,14 @@ static void RunTaskWithResult(base::Callback<T(void)> task, |
class GpuInProcessThread |
: public base::Thread, |
- public base::RefCountedThreadSafe<GpuInProcessThread> { |
+ public InProcessCommandBuffer::Service { |
public: |
GpuInProcessThread(); |
+ virtual void ScheduleTask(const base::Closure& task) OVERRIDE; |
+ virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; |
+ |
private: |
- friend class base::RefCountedThreadSafe<GpuInProcessThread>; |
virtual ~GpuInProcessThread(); |
DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); |
@@ -79,186 +77,102 @@ GpuInProcessThread::~GpuInProcessThread() { |
Stop(); |
} |
-// Used with explicit scheduling when there is no dedicated GPU thread. |
-class GpuCommandQueue { |
- public: |
- GpuCommandQueue(); |
- ~GpuCommandQueue(); |
- |
- void QueueTask(const base::Closure& task); |
- void RunTasks(); |
- void SetScheduleCallback(const base::Closure& callback); |
- |
- private: |
- base::Lock tasks_lock_; |
- std::queue<base::Closure> tasks_; |
- base::Closure schedule_callback_; |
- |
- DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); |
-}; |
- |
-GpuCommandQueue::GpuCommandQueue() {} |
- |
-GpuCommandQueue::~GpuCommandQueue() { |
- base::AutoLock lock(tasks_lock_); |
- DCHECK(tasks_.empty()); |
+void GpuInProcessThread::ScheduleTask(const base::Closure& task) { |
+ message_loop()->PostTask(FROM_HERE, task); |
} |
-void GpuCommandQueue::QueueTask(const base::Closure& task) { |
- { |
- base::AutoLock lock(tasks_lock_); |
- tasks_.push(task); |
- } |
- |
- DCHECK(!schedule_callback_.is_null()); |
- schedule_callback_.Run(); |
-} |
- |
-void GpuCommandQueue::RunTasks() { |
- size_t num_tasks; |
- { |
- base::AutoLock lock(tasks_lock_); |
- num_tasks = tasks_.size(); |
- } |
- |
- while (num_tasks) { |
- base::Closure task; |
- { |
- base::AutoLock lock(tasks_lock_); |
- task = tasks_.front(); |
- tasks_.pop(); |
- num_tasks = tasks_.size(); |
- } |
- |
- task.Run(); |
- } |
-} |
- |
-void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { |
- DCHECK(schedule_callback_.is_null()); |
- schedule_callback_ = callback; |
+void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) { |
+ message_loop()->PostDelayedTask( |
+ FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); |
} |
-static base::LazyInstance<GpuCommandQueue> g_gpu_queue = |
+base::LazyInstance<std::set<InProcessCommandBuffer*> > all_clients_ = |
LAZY_INSTANCE_INITIALIZER; |
+base::LazyInstance<base::Lock> all_clients_lock_ = LAZY_INSTANCE_INITIALIZER; |
-class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient { |
+class ScopedEvent { |
public: |
- explicit SchedulerClientBase(bool need_thread); |
- virtual ~SchedulerClientBase(); |
- |
- static bool HasClients(); |
- |
- protected: |
- scoped_refptr<GpuInProcessThread> thread_; |
+ ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
+ ~ScopedEvent() { event_->Signal(); } |
private: |
- static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_; |
- static base::LazyInstance<base::Lock> all_clients_lock_; |
+ base::WaitableEvent* event_; |
}; |
-base::LazyInstance<std::set<SchedulerClientBase*> > |
- SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER; |
-base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ = |
- LAZY_INSTANCE_INITIALIZER; |
- |
-SchedulerClientBase::SchedulerClientBase(bool need_thread) { |
- base::AutoLock lock(all_clients_lock_.Get()); |
- if (need_thread) { |
- if (!all_clients_.Get().empty()) { |
- SchedulerClientBase* other = *all_clients_.Get().begin(); |
- thread_ = other->thread_; |
- DCHECK(thread_.get()); |
- } else { |
- thread_ = new GpuInProcessThread; |
- } |
- } |
- all_clients_.Get().insert(this); |
-} |
- |
-SchedulerClientBase::~SchedulerClientBase() { |
- base::AutoLock lock(all_clients_lock_.Get()); |
- all_clients_.Get().erase(this); |
-} |
- |
-bool SchedulerClientBase::HasClients() { |
- base::AutoLock lock(all_clients_lock_.Get()); |
- return !all_clients_.Get().empty(); |
-} |
- |
-// A client that talks to the GPU thread |
-class ThreadClient : public SchedulerClientBase { |
- public: |
- ThreadClient(); |
- virtual void QueueTask(const base::Closure& task) OVERRIDE; |
- virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; |
-}; |
+} // anonyous namespace |
-ThreadClient::ThreadClient() : SchedulerClientBase(true) { |
- DCHECK(thread_.get()); |
+InProcessCommandBuffer::Service::Service() { |
+ share_group_sequence_checker_.DetachFromSequence(); |
} |
-void ThreadClient::QueueTask(const base::Closure& task) { |
- thread_->message_loop()->PostTask(FROM_HERE, task); |
+InProcessCommandBuffer::Service::~Service() { |
+ DCHECK(all_shared_contexts_.empty()); |
} |
-void ThreadClient::ScheduleIdleWork(const base::Closure& callback) { |
- thread_->message_loop()->PostDelayedTask( |
- FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); |
+InProcessCommandBuffer* InProcessCommandBuffer::Service::GetShareGroup( |
+ unsigned int group_id) { |
+ DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread()); |
+ for (std::set<InProcessCommandBuffer*>::const_iterator it = |
+ all_shared_contexts_.begin(); |
+ it != all_shared_contexts_.end(); |
+ ++it) { |
+ if ((*it)->share_group_id_ == group_id) |
+ return *it; |
+ } |
+ return NULL; |
} |
-// A client that talks to the GpuCommandQueue |
-class QueueClient : public SchedulerClientBase { |
- public: |
- QueueClient(); |
- virtual void QueueTask(const base::Closure& task) OVERRIDE; |
- virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; |
-}; |
- |
-QueueClient::QueueClient() : SchedulerClientBase(false) { |
- DCHECK(!thread_.get()); |
+void InProcessCommandBuffer::Service::AddToShareGroup(InProcessCommandBuffer* context) { |
+ DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread()); |
+ all_shared_contexts_.insert(context); |
} |
-void QueueClient::QueueTask(const base::Closure& task) { |
- g_gpu_queue.Get().QueueTask(task); |
+void InProcessCommandBuffer::Service::RemoveFromShareGroup( |
+ InProcessCommandBuffer* context) { |
+ DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread()); |
+ all_shared_contexts_.erase(context); |
} |
-void QueueClient::ScheduleIdleWork(const base::Closure& callback) { |
- // TODO(sievers): Should this do anything? |
+void InProcessCommandBuffer::Service::MarkShareGroupAsLost() { |
+ DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread()); |
+ for (std::set<InProcessCommandBuffer*>::iterator it = |
+ all_shared_contexts_.begin(); |
+ it != all_shared_contexts_.end(); |
+ ++it) { |
+ (*it)->context_lost_ = true; |
+ } |
} |
-static scoped_ptr<InProcessCommandBuffer::SchedulerClient> |
-CreateSchedulerClient() { |
- scoped_ptr<InProcessCommandBuffer::SchedulerClient> client; |
- if (g_uses_explicit_scheduling) |
- client.reset(new QueueClient); |
- else |
- client.reset(new ThreadClient); |
- |
- return client.Pass(); |
+scoped_refptr<InProcessCommandBuffer::Service> |
+InProcessCommandBuffer::GetDefaultService() { |
boliu
2014/02/08 18:55:54
Can this just be in an anonymous namespace?
no sievers
2014/02/12 03:09:15
It's a member function just so that it can access
|
+ base::AutoLock lock(all_clients_lock_.Get()); |
+ scoped_refptr<Service> service; |
+ if (!all_clients_.Get().empty()) { |
+ InProcessCommandBuffer* other = *all_clients_.Get().begin(); |
+ service = other->queue_; |
+ DCHECK(service.get()); |
+ } else { |
+ service = new GpuInProcessThread; |
+ } |
+ return service; |
} |
-class ScopedEvent { |
- public: |
- ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
- ~ScopedEvent() { event_->Signal(); } |
- |
- private: |
- base::WaitableEvent* event_; |
-}; |
- |
-} // anonyous namespace |
- |
-InProcessCommandBuffer::InProcessCommandBuffer() |
+InProcessCommandBuffer::InProcessCommandBuffer( |
+ const scoped_refptr<Service>& service) |
: context_lost_(false), |
share_group_id_(0), |
last_put_offset_(-1), |
flush_event_(false, false), |
- queue_(CreateSchedulerClient()), |
- gpu_thread_weak_ptr_factory_(this) {} |
+ queue_(service.get() ? service : GetDefaultService()), |
+ gpu_thread_weak_ptr_factory_(this) { |
+ base::AutoLock lock(all_clients_lock_.Get()); |
+ all_clients_.Get().insert(this); |
+} |
InProcessCommandBuffer::~InProcessCommandBuffer() { |
Destroy(); |
+ base::AutoLock lock(all_clients_lock_.Get()); |
+ all_clients_.Get().erase(this); |
} |
bool InProcessCommandBuffer::IsContextLost() { |
@@ -375,23 +289,14 @@ bool InProcessCommandBuffer::InitializeOnGpuThread( |
} |
InProcessCommandBuffer* context_group = NULL; |
- |
- if (share_resources_ && !g_all_shared_contexts.Get().empty()) { |
- DCHECK(share_group_id_); |
- for (std::set<InProcessCommandBuffer*>::iterator it = |
- g_all_shared_contexts.Get().begin(); |
- it != g_all_shared_contexts.Get().end(); |
- ++it) { |
- if ((*it)->share_group_id_ == share_group_id_) { |
- context_group = *it; |
- DCHECK(context_group->share_resources_); |
- context_lost_ = context_group->IsContextLost(); |
- break; |
- } |
- } |
- if (!context_group) |
- share_group = new gfx::GLShareGroup; |
+ DCHECK(share_group_id_); |
+ if (share_resources_ && |
+ (context_group = queue_->GetShareGroup(share_group_id_))) { |
+ DCHECK(context_group->share_resources_); |
+ context_lost_ = context_group->IsContextLost(); |
} |
+ if (!context_group) |
+ share_group = new gfx::GLShareGroup; |
#if defined(OS_ANDROID) |
stream_texture_manager_.reset(new StreamTextureManagerInProcess); |
@@ -486,9 +391,8 @@ bool InProcessCommandBuffer::InitializeOnGpuThread( |
&InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_)); |
} |
- if (share_resources_) { |
- g_all_shared_contexts.Pointer()->insert(this); |
- } |
+ if (share_resources_) |
+ queue_->AddToShareGroup(this); |
return true; |
} |
@@ -521,7 +425,7 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() { |
stream_texture_manager_.reset(); |
#endif |
- g_all_shared_contexts.Pointer()->erase(this); |
+ queue_->RemoveFromShareGroup(this); |
return true; |
} |
@@ -538,14 +442,8 @@ void InProcessCommandBuffer::OnContextLost() { |
} |
context_lost_ = true; |
- if (share_resources_) { |
- for (std::set<InProcessCommandBuffer*>::iterator it = |
- g_all_shared_contexts.Get().begin(); |
- it != g_all_shared_contexts.Get().end(); |
- ++it) { |
- (*it)->context_lost_ = true; |
- } |
- } |
+ if (share_resources_) |
+ queue_->MarkShareGroupAsLost(); |
} |
CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
@@ -828,20 +726,6 @@ void InProcessCommandBuffer::EnableVirtualizedContext() { |
} |
// static |
-void InProcessCommandBuffer::SetScheduleCallback( |
- const base::Closure& callback) { |
- DCHECK(!g_uses_explicit_scheduling); |
- DCHECK(!SchedulerClientBase::HasClients()); |
- g_uses_explicit_scheduling = true; |
- g_gpu_queue.Get().SetScheduleCallback(callback); |
-} |
- |
-// static |
-void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { |
- g_gpu_queue.Get().RunTasks(); |
-} |
- |
-// static |
void InProcessCommandBuffer::SetGpuMemoryBufferFactory( |
GpuMemoryBufferFactory* factory) { |
g_gpu_memory_buffer_factory = factory; |