Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 143023005: Support multiple service instances with GLInProcessContext (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <utility> 8 #include <utility>
9 9
10 #include <GLES2/gl2.h> 10 #include <GLES2/gl2.h>
(...skipping 25 matching lines...) Expand all
36 36
37 #if defined(OS_ANDROID) 37 #if defined(OS_ANDROID)
38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h " 38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h "
39 #include "ui/gl/android/surface_texture.h" 39 #include "ui/gl/android/surface_texture.h"
40 #endif 40 #endif
41 41
42 namespace gpu { 42 namespace gpu {
43 43
44 namespace { 44 namespace {
45 45
46 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
47 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
48
49 static bool g_use_virtualized_gl_context = false; 46 static bool g_use_virtualized_gl_context = false;
boliu 2014/02/08 18:55:54 Can/should this be per-service too?
no sievers 2014/02/12 03:09:15 Good idea. Done.
50 static bool g_uses_explicit_scheduling = false;
51 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL; 47 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
52 48
53 template <typename T> 49 template <typename T>
54 static void RunTaskWithResult(base::Callback<T(void)> task, 50 static void RunTaskWithResult(base::Callback<T(void)> task,
55 T* result, 51 T* result,
56 base::WaitableEvent* completion) { 52 base::WaitableEvent* completion) {
57 *result = task.Run(); 53 *result = task.Run();
58 completion->Signal(); 54 completion->Signal();
59 } 55 }
60 56
61 class GpuInProcessThread 57 class GpuInProcessThread
62 : public base::Thread, 58 : public base::Thread,
63 public base::RefCountedThreadSafe<GpuInProcessThread> { 59 public InProcessCommandBuffer::Service {
64 public: 60 public:
65 GpuInProcessThread(); 61 GpuInProcessThread();
66 62
63 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
64 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
65
67 private: 66 private:
68 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
69 virtual ~GpuInProcessThread(); 67 virtual ~GpuInProcessThread();
70 68
71 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); 69 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
72 }; 70 };
73 71
74 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { 72 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
75 Start(); 73 Start();
76 } 74 }
77 75
78 GpuInProcessThread::~GpuInProcessThread() { 76 GpuInProcessThread::~GpuInProcessThread() {
79 Stop(); 77 Stop();
80 } 78 }
81 79
82 // Used with explicit scheduling when there is no dedicated GPU thread. 80 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
83 class GpuCommandQueue { 81 message_loop()->PostTask(FROM_HERE, task);
84 public:
85 GpuCommandQueue();
86 ~GpuCommandQueue();
87
88 void QueueTask(const base::Closure& task);
89 void RunTasks();
90 void SetScheduleCallback(const base::Closure& callback);
91
92 private:
93 base::Lock tasks_lock_;
94 std::queue<base::Closure> tasks_;
95 base::Closure schedule_callback_;
96
97 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
98 };
99
100 GpuCommandQueue::GpuCommandQueue() {}
101
102 GpuCommandQueue::~GpuCommandQueue() {
103 base::AutoLock lock(tasks_lock_);
104 DCHECK(tasks_.empty());
105 } 82 }
106 83
107 void GpuCommandQueue::QueueTask(const base::Closure& task) { 84 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
108 { 85 message_loop()->PostDelayedTask(
109 base::AutoLock lock(tasks_lock_);
110 tasks_.push(task);
111 }
112
113 DCHECK(!schedule_callback_.is_null());
114 schedule_callback_.Run();
115 }
116
117 void GpuCommandQueue::RunTasks() {
118 size_t num_tasks;
119 {
120 base::AutoLock lock(tasks_lock_);
121 num_tasks = tasks_.size();
122 }
123
124 while (num_tasks) {
125 base::Closure task;
126 {
127 base::AutoLock lock(tasks_lock_);
128 task = tasks_.front();
129 tasks_.pop();
130 num_tasks = tasks_.size();
131 }
132
133 task.Run();
134 }
135 }
136
137 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
138 DCHECK(schedule_callback_.is_null());
139 schedule_callback_ = callback;
140 }
141
142 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
143 LAZY_INSTANCE_INITIALIZER;
144
145 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
146 public:
147 explicit SchedulerClientBase(bool need_thread);
148 virtual ~SchedulerClientBase();
149
150 static bool HasClients();
151
152 protected:
153 scoped_refptr<GpuInProcessThread> thread_;
154
155 private:
156 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
157 static base::LazyInstance<base::Lock> all_clients_lock_;
158 };
159
160 base::LazyInstance<std::set<SchedulerClientBase*> >
161 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
162 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
163 LAZY_INSTANCE_INITIALIZER;
164
165 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
166 base::AutoLock lock(all_clients_lock_.Get());
167 if (need_thread) {
168 if (!all_clients_.Get().empty()) {
169 SchedulerClientBase* other = *all_clients_.Get().begin();
170 thread_ = other->thread_;
171 DCHECK(thread_.get());
172 } else {
173 thread_ = new GpuInProcessThread;
174 }
175 }
176 all_clients_.Get().insert(this);
177 }
178
179 SchedulerClientBase::~SchedulerClientBase() {
180 base::AutoLock lock(all_clients_lock_.Get());
181 all_clients_.Get().erase(this);
182 }
183
184 bool SchedulerClientBase::HasClients() {
185 base::AutoLock lock(all_clients_lock_.Get());
186 return !all_clients_.Get().empty();
187 }
188
189 // A client that talks to the GPU thread
190 class ThreadClient : public SchedulerClientBase {
191 public:
192 ThreadClient();
193 virtual void QueueTask(const base::Closure& task) OVERRIDE;
194 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
195 };
196
197 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
198 DCHECK(thread_.get());
199 }
200
201 void ThreadClient::QueueTask(const base::Closure& task) {
202 thread_->message_loop()->PostTask(FROM_HERE, task);
203 }
204
205 void ThreadClient::ScheduleIdleWork(const base::Closure& callback) {
206 thread_->message_loop()->PostDelayedTask(
207 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); 86 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
208 } 87 }
209 88
210 // A client that talks to the GpuCommandQueue 89 base::LazyInstance<std::set<InProcessCommandBuffer*> > all_clients_ =
211 class QueueClient : public SchedulerClientBase { 90 LAZY_INSTANCE_INITIALIZER;
212 public: 91 base::LazyInstance<base::Lock> all_clients_lock_ = LAZY_INSTANCE_INITIALIZER;
213 QueueClient();
214 virtual void QueueTask(const base::Closure& task) OVERRIDE;
215 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
216 };
217
218 QueueClient::QueueClient() : SchedulerClientBase(false) {
219 DCHECK(!thread_.get());
220 }
221
222 void QueueClient::QueueTask(const base::Closure& task) {
223 g_gpu_queue.Get().QueueTask(task);
224 }
225
226 void QueueClient::ScheduleIdleWork(const base::Closure& callback) {
227 // TODO(sievers): Should this do anything?
228 }
229
230 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
231 CreateSchedulerClient() {
232 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
233 if (g_uses_explicit_scheduling)
234 client.reset(new QueueClient);
235 else
236 client.reset(new ThreadClient);
237
238 return client.Pass();
239 }
240 92
241 class ScopedEvent { 93 class ScopedEvent {
242 public: 94 public:
243 ScopedEvent(base::WaitableEvent* event) : event_(event) {} 95 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
244 ~ScopedEvent() { event_->Signal(); } 96 ~ScopedEvent() { event_->Signal(); }
245 97
246 private: 98 private:
247 base::WaitableEvent* event_; 99 base::WaitableEvent* event_;
248 }; 100 };
249 101
250 } // anonyous namespace 102 } // anonyous namespace
251 103
252 InProcessCommandBuffer::InProcessCommandBuffer() 104 InProcessCommandBuffer::Service::Service() {
105 share_group_sequence_checker_.DetachFromSequence();
106 }
107
108 InProcessCommandBuffer::Service::~Service() {
109 DCHECK(all_shared_contexts_.empty());
110 }
111
112 InProcessCommandBuffer* InProcessCommandBuffer::Service::GetShareGroup(
113 unsigned int group_id) {
114 DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread());
115 for (std::set<InProcessCommandBuffer*>::const_iterator it =
116 all_shared_contexts_.begin();
117 it != all_shared_contexts_.end();
118 ++it) {
119 if ((*it)->share_group_id_ == group_id)
120 return *it;
121 }
122 return NULL;
123 }
124
125 void InProcessCommandBuffer::Service::AddToShareGroup(InProcessCommandBuffer* co ntext) {
126 DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread());
127 all_shared_contexts_.insert(context);
128 }
129
130 void InProcessCommandBuffer::Service::RemoveFromShareGroup(
131 InProcessCommandBuffer* context) {
132 DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread());
133 all_shared_contexts_.erase(context);
134 }
135
136 void InProcessCommandBuffer::Service::MarkShareGroupAsLost() {
137 DCHECK(share_group_sequence_checker_.CalledOnValidSequencedThread());
138 for (std::set<InProcessCommandBuffer*>::iterator it =
139 all_shared_contexts_.begin();
140 it != all_shared_contexts_.end();
141 ++it) {
142 (*it)->context_lost_ = true;
143 }
144 }
145
146 scoped_refptr<InProcessCommandBuffer::Service>
147 InProcessCommandBuffer::GetDefaultService() {
boliu 2014/02/08 18:55:54 Can this just be in an anonymous namespace?
no sievers 2014/02/12 03:09:15 It's a member function just so that it can access
148 base::AutoLock lock(all_clients_lock_.Get());
149 scoped_refptr<Service> service;
150 if (!all_clients_.Get().empty()) {
151 InProcessCommandBuffer* other = *all_clients_.Get().begin();
152 service = other->queue_;
153 DCHECK(service.get());
154 } else {
155 service = new GpuInProcessThread;
156 }
157 return service;
158 }
159
160 InProcessCommandBuffer::InProcessCommandBuffer(
161 const scoped_refptr<Service>& service)
253 : context_lost_(false), 162 : context_lost_(false),
254 share_group_id_(0), 163 share_group_id_(0),
255 last_put_offset_(-1), 164 last_put_offset_(-1),
256 flush_event_(false, false), 165 flush_event_(false, false),
257 queue_(CreateSchedulerClient()), 166 queue_(service.get() ? service : GetDefaultService()),
258 gpu_thread_weak_ptr_factory_(this) {} 167 gpu_thread_weak_ptr_factory_(this) {
168 base::AutoLock lock(all_clients_lock_.Get());
169 all_clients_.Get().insert(this);
170 }
259 171
260 InProcessCommandBuffer::~InProcessCommandBuffer() { 172 InProcessCommandBuffer::~InProcessCommandBuffer() {
261 Destroy(); 173 Destroy();
174 base::AutoLock lock(all_clients_lock_.Get());
175 all_clients_.Get().erase(this);
262 } 176 }
263 177
264 bool InProcessCommandBuffer::IsContextLost() { 178 bool InProcessCommandBuffer::IsContextLost() {
265 CheckSequencedThread(); 179 CheckSequencedThread();
266 if (context_lost_ || !command_buffer_) { 180 if (context_lost_ || !command_buffer_) {
267 return true; 181 return true;
268 } 182 }
269 CommandBuffer::State state = GetState(); 183 CommandBuffer::State state = GetState();
270 return error::IsError(state.error); 184 return error::IsError(state.error);
271 } 185 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
368 command_buffer->SetParseErrorCallback(base::Bind( 282 command_buffer->SetParseErrorCallback(base::Bind(
369 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_)); 283 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
370 284
371 if (!command_buffer->Initialize()) { 285 if (!command_buffer->Initialize()) {
372 LOG(ERROR) << "Could not initialize command buffer."; 286 LOG(ERROR) << "Could not initialize command buffer.";
373 DestroyOnGpuThread(); 287 DestroyOnGpuThread();
374 return false; 288 return false;
375 } 289 }
376 290
377 InProcessCommandBuffer* context_group = NULL; 291 InProcessCommandBuffer* context_group = NULL;
378 292 DCHECK(share_group_id_);
379 if (share_resources_ && !g_all_shared_contexts.Get().empty()) { 293 if (share_resources_ &&
380 DCHECK(share_group_id_); 294 (context_group = queue_->GetShareGroup(share_group_id_))) {
381 for (std::set<InProcessCommandBuffer*>::iterator it = 295 DCHECK(context_group->share_resources_);
382 g_all_shared_contexts.Get().begin(); 296 context_lost_ = context_group->IsContextLost();
383 it != g_all_shared_contexts.Get().end();
384 ++it) {
385 if ((*it)->share_group_id_ == share_group_id_) {
386 context_group = *it;
387 DCHECK(context_group->share_resources_);
388 context_lost_ = context_group->IsContextLost();
389 break;
390 }
391 }
392 if (!context_group)
393 share_group = new gfx::GLShareGroup;
394 } 297 }
298 if (!context_group)
299 share_group = new gfx::GLShareGroup;
395 300
396 #if defined(OS_ANDROID) 301 #if defined(OS_ANDROID)
397 stream_texture_manager_.reset(new StreamTextureManagerInProcess); 302 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
398 #endif 303 #endif
399 304
400 bool bind_generates_resource = false; 305 bool bind_generates_resource = false;
401 decoder_.reset(gles2::GLES2Decoder::Create( 306 decoder_.reset(gles2::GLES2Decoder::Create(
402 context_group ? context_group->decoder_->GetContextGroup() 307 context_group ? context_group->decoder_->GetContextGroup()
403 : new gles2::ContextGroup(NULL, 308 : new gles2::ContextGroup(NULL,
404 NULL, 309 NULL,
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
479 decoder_->GetQueryManager(), 384 decoder_->GetQueryManager(),
480 decoder_->GetCapabilities())); 385 decoder_->GetCapabilities()));
481 386
482 *params.capabilities = gpu_control_->GetCapabilities(); 387 *params.capabilities = gpu_control_->GetCapabilities();
483 388
484 if (!params.is_offscreen) { 389 if (!params.is_offscreen) {
485 decoder_->SetResizeCallback(base::Bind( 390 decoder_->SetResizeCallback(base::Bind(
486 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_)); 391 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
487 } 392 }
488 393
489 if (share_resources_) { 394 if (share_resources_)
490 g_all_shared_contexts.Pointer()->insert(this); 395 queue_->AddToShareGroup(this);
491 }
492 396
493 return true; 397 return true;
494 } 398 }
495 399
496 void InProcessCommandBuffer::Destroy() { 400 void InProcessCommandBuffer::Destroy() {
497 CheckSequencedThread(); 401 CheckSequencedThread();
498 402
499 base::WaitableEvent completion(true, false); 403 base::WaitableEvent completion(true, false);
500 bool result = false; 404 bool result = false;
501 base::Callback<bool(void)> destroy_task = base::Bind( 405 base::Callback<bool(void)> destroy_task = base::Bind(
(...skipping 12 matching lines...) Expand all
514 if (decoder_) { 418 if (decoder_) {
515 decoder_->Destroy(have_context); 419 decoder_->Destroy(have_context);
516 decoder_.reset(); 420 decoder_.reset();
517 } 421 }
518 context_ = NULL; 422 context_ = NULL;
519 surface_ = NULL; 423 surface_ = NULL;
520 #if defined(OS_ANDROID) 424 #if defined(OS_ANDROID)
521 stream_texture_manager_.reset(); 425 stream_texture_manager_.reset();
522 #endif 426 #endif
523 427
524 g_all_shared_contexts.Pointer()->erase(this); 428 queue_->RemoveFromShareGroup(this);
525 return true; 429 return true;
526 } 430 }
527 431
528 void InProcessCommandBuffer::CheckSequencedThread() { 432 void InProcessCommandBuffer::CheckSequencedThread() {
529 DCHECK(!sequence_checker_ || 433 DCHECK(!sequence_checker_ ||
530 sequence_checker_->CalledOnValidSequencedThread()); 434 sequence_checker_->CalledOnValidSequencedThread());
531 } 435 }
532 436
533 void InProcessCommandBuffer::OnContextLost() { 437 void InProcessCommandBuffer::OnContextLost() {
534 CheckSequencedThread(); 438 CheckSequencedThread();
535 if (!context_lost_callback_.is_null()) { 439 if (!context_lost_callback_.is_null()) {
536 context_lost_callback_.Run(); 440 context_lost_callback_.Run();
537 context_lost_callback_.Reset(); 441 context_lost_callback_.Reset();
538 } 442 }
539 443
540 context_lost_ = true; 444 context_lost_ = true;
541 if (share_resources_) { 445 if (share_resources_)
542 for (std::set<InProcessCommandBuffer*>::iterator it = 446 queue_->MarkShareGroupAsLost();
543 g_all_shared_contexts.Get().begin();
544 it != g_all_shared_contexts.Get().end();
545 ++it) {
546 (*it)->context_lost_ = true;
547 }
548 }
549 } 447 }
550 448
551 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { 449 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
552 CheckSequencedThread(); 450 CheckSequencedThread();
553 base::AutoLock lock(state_after_last_flush_lock_); 451 base::AutoLock lock(state_after_last_flush_lock_);
554 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) 452 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
555 last_state_ = state_after_last_flush_; 453 last_state_ = state_after_last_flush_;
556 return last_state_; 454 return last_state_;
557 } 455 }
558 456
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
821 return stream_texture_manager_->GetSurfaceTexture(stream_id); 719 return stream_texture_manager_->GetSurfaceTexture(stream_id);
822 } 720 }
823 #endif 721 #endif
824 722
825 // static 723 // static
826 void InProcessCommandBuffer::EnableVirtualizedContext() { 724 void InProcessCommandBuffer::EnableVirtualizedContext() {
827 g_use_virtualized_gl_context = true; 725 g_use_virtualized_gl_context = true;
828 } 726 }
829 727
830 // static 728 // static
831 void InProcessCommandBuffer::SetScheduleCallback(
832 const base::Closure& callback) {
833 DCHECK(!g_uses_explicit_scheduling);
834 DCHECK(!SchedulerClientBase::HasClients());
835 g_uses_explicit_scheduling = true;
836 g_gpu_queue.Get().SetScheduleCallback(callback);
837 }
838
839 // static
840 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
841 g_gpu_queue.Get().RunTasks();
842 }
843
844 // static
845 void InProcessCommandBuffer::SetGpuMemoryBufferFactory( 729 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
846 GpuMemoryBufferFactory* factory) { 730 GpuMemoryBufferFactory* factory) {
847 g_gpu_memory_buffer_factory = factory; 731 g_gpu_memory_buffer_factory = factory;
848 } 732 }
849 733
850 } // namespace gpu 734 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698