Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(175)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 143023005: Support multiple service instances with GLInProcessContext (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <utility> 8 #include <utility>
9 9
10 #include <GLES2/gl2.h> 10 #include <GLES2/gl2.h>
(...skipping 25 matching lines...) Expand all
36 36
37 #if defined(OS_ANDROID) 37 #if defined(OS_ANDROID)
38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h " 38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h "
39 #include "ui/gl/android/surface_texture.h" 39 #include "ui/gl/android/surface_texture.h"
40 #endif 40 #endif
41 41
42 namespace gpu { 42 namespace gpu {
43 43
44 namespace { 44 namespace {
45 45
46 static bool g_use_virtualized_gl_context = false;
47 static bool g_uses_explicit_scheduling = false;
48 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL; 46 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
49 47
50 template <typename T> 48 template <typename T>
51 static void RunTaskWithResult(base::Callback<T(void)> task, 49 static void RunTaskWithResult(base::Callback<T(void)> task,
52 T* result, 50 T* result,
53 base::WaitableEvent* completion) { 51 base::WaitableEvent* completion) {
54 *result = task.Run(); 52 *result = task.Run();
55 completion->Signal(); 53 completion->Signal();
56 } 54 }
57 55
58 class GpuInProcessThread 56 class GpuInProcessThread
59 : public base::Thread, 57 : public base::Thread,
60 public base::RefCountedThreadSafe<GpuInProcessThread> { 58 public InProcessCommandBuffer::Service {
61 public: 59 public:
62 GpuInProcessThread(); 60 GpuInProcessThread();
63 61
62 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
63 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
64 virtual bool UseVirtualizedGLContexts() { return false; }
65
64 private: 66 private:
65 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
66 virtual ~GpuInProcessThread(); 67 virtual ~GpuInProcessThread();
67 68
68 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); 69 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
69 }; 70 };
70 71
71 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { 72 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
72 Start(); 73 Start();
73 } 74 }
74 75
75 GpuInProcessThread::~GpuInProcessThread() { 76 GpuInProcessThread::~GpuInProcessThread() {
76 Stop(); 77 Stop();
77 } 78 }
78 79
79 // Used with explicit scheduling when there is no dedicated GPU thread. 80 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
80 class GpuCommandQueue { 81 message_loop()->PostTask(FROM_HERE, task);
81 public:
82 GpuCommandQueue();
83 ~GpuCommandQueue();
84
85 void QueueTask(const base::Closure& task);
86 void RunTasks();
87 void SetScheduleCallback(const base::Closure& callback);
88
89 private:
90 base::Lock tasks_lock_;
91 std::queue<base::Closure> tasks_;
92 base::Closure schedule_callback_;
93
94 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
95 };
96
97 GpuCommandQueue::GpuCommandQueue() {}
98
99 GpuCommandQueue::~GpuCommandQueue() {
100 base::AutoLock lock(tasks_lock_);
101 DCHECK(tasks_.empty());
102 } 82 }
103 83
104 void GpuCommandQueue::QueueTask(const base::Closure& task) { 84 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
105 { 85 message_loop()->PostDelayedTask(
106 base::AutoLock lock(tasks_lock_);
107 tasks_.push(task);
108 }
109
110 DCHECK(!schedule_callback_.is_null());
111 schedule_callback_.Run();
112 }
113
114 void GpuCommandQueue::RunTasks() {
115 size_t num_tasks;
116 {
117 base::AutoLock lock(tasks_lock_);
118 num_tasks = tasks_.size();
119 }
120
121 while (num_tasks) {
122 base::Closure task;
123 {
124 base::AutoLock lock(tasks_lock_);
125 task = tasks_.front();
126 tasks_.pop();
127 num_tasks = tasks_.size();
128 }
129
130 task.Run();
131 }
132 }
133
134 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
135 DCHECK(schedule_callback_.is_null());
136 schedule_callback_ = callback;
137 }
138
139 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
140 LAZY_INSTANCE_INITIALIZER;
141
142 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
143 public:
144 explicit SchedulerClientBase(bool need_thread);
145 virtual ~SchedulerClientBase();
146
147 static bool HasClients();
148
149 protected:
150 scoped_refptr<GpuInProcessThread> thread_;
151
152 private:
153 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
154 static base::LazyInstance<base::Lock> all_clients_lock_;
155 };
156
157 base::LazyInstance<std::set<SchedulerClientBase*> >
158 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
159 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
160 LAZY_INSTANCE_INITIALIZER;
161
162 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
163 base::AutoLock lock(all_clients_lock_.Get());
164 if (need_thread) {
165 if (!all_clients_.Get().empty()) {
166 SchedulerClientBase* other = *all_clients_.Get().begin();
167 thread_ = other->thread_;
168 DCHECK(thread_.get());
169 } else {
170 thread_ = new GpuInProcessThread;
171 }
172 }
173 all_clients_.Get().insert(this);
174 }
175
176 SchedulerClientBase::~SchedulerClientBase() {
177 base::AutoLock lock(all_clients_lock_.Get());
178 all_clients_.Get().erase(this);
179 }
180
181 bool SchedulerClientBase::HasClients() {
182 base::AutoLock lock(all_clients_lock_.Get());
183 return !all_clients_.Get().empty();
184 }
185
186 // A client that talks to the GPU thread
187 class ThreadClient : public SchedulerClientBase {
188 public:
189 ThreadClient();
190 virtual void QueueTask(const base::Closure& task) OVERRIDE;
191 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
192 };
193
194 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
195 DCHECK(thread_.get());
196 }
197
198 void ThreadClient::QueueTask(const base::Closure& task) {
199 thread_->message_loop()->PostTask(FROM_HERE, task);
200 }
201
202 void ThreadClient::ScheduleIdleWork(const base::Closure& callback) {
203 thread_->message_loop()->PostDelayedTask(
204 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); 86 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
205 } 87 }
206 88
207 // A client that talks to the GpuCommandQueue 89 base::LazyInstance<std::set<InProcessCommandBuffer*> > all_clients_ =
208 class QueueClient : public SchedulerClientBase { 90 LAZY_INSTANCE_INITIALIZER;
209 public: 91 base::LazyInstance<base::Lock> all_clients_lock_ = LAZY_INSTANCE_INITIALIZER;
210 QueueClient();
211 virtual void QueueTask(const base::Closure& task) OVERRIDE;
212 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
213 };
214
215 QueueClient::QueueClient() : SchedulerClientBase(false) {
216 DCHECK(!thread_.get());
217 }
218
219 void QueueClient::QueueTask(const base::Closure& task) {
220 g_gpu_queue.Get().QueueTask(task);
221 }
222
223 void QueueClient::ScheduleIdleWork(const base::Closure& callback) {
224 // TODO(sievers): Should this do anything?
225 }
226
227 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
228 CreateSchedulerClient() {
229 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
230 if (g_uses_explicit_scheduling)
231 client.reset(new QueueClient);
232 else
233 client.reset(new ThreadClient);
234
235 return client.Pass();
236 }
237 92
238 class ScopedEvent { 93 class ScopedEvent {
239 public: 94 public:
240 ScopedEvent(base::WaitableEvent* event) : event_(event) {} 95 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
241 ~ScopedEvent() { event_->Signal(); } 96 ~ScopedEvent() { event_->Signal(); }
242 97
243 private: 98 private:
244 base::WaitableEvent* event_; 99 base::WaitableEvent* event_;
245 }; 100 };
246 101
247 } // anonyous namespace 102 } // anonyous namespace
248 103
249 InProcessCommandBuffer::InProcessCommandBuffer() 104 InProcessCommandBuffer::Service::Service() {}
105
106 InProcessCommandBuffer::Service::~Service() {}
107
108 scoped_refptr<InProcessCommandBuffer::Service>
109 InProcessCommandBuffer::GetDefaultService() {
110 base::AutoLock lock(all_clients_lock_.Get());
111 scoped_refptr<Service> service;
no sievers 2014/02/12 03:19:36 Errr, I guess this could just be a lazy instance n
piman 2014/02/12 04:10:42 Ys, that would be a lot simpler.
no sievers 2014/02/12 22:31:29 On second thought: Then it would leak though. So m
112 if (!all_clients_.Get().empty()) {
113 InProcessCommandBuffer* other = *all_clients_.Get().begin();
114 service = other->queue_;
115 DCHECK(service.get());
116 } else {
117 service = new GpuInProcessThread;
118 }
119 return service;
120 }
121
122 InProcessCommandBuffer::InProcessCommandBuffer(
123 const scoped_refptr<Service>& service)
250 : context_lost_(false), 124 : context_lost_(false),
251 last_put_offset_(-1), 125 last_put_offset_(-1),
252 flush_event_(false, false), 126 flush_event_(false, false),
253 queue_(CreateSchedulerClient()), 127 queue_(service.get() ? service : GetDefaultService()),
254 gpu_thread_weak_ptr_factory_(this) {} 128 gpu_thread_weak_ptr_factory_(this) {
129 base::AutoLock lock(all_clients_lock_.Get());
130 all_clients_.Get().insert(this);
131 }
255 132
256 InProcessCommandBuffer::~InProcessCommandBuffer() { 133 InProcessCommandBuffer::~InProcessCommandBuffer() {
257 Destroy(); 134 Destroy();
135 base::AutoLock lock(all_clients_lock_.Get());
136 all_clients_.Get().erase(this);
258 } 137 }
259 138
260 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { 139 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
261 CheckSequencedThread(); 140 CheckSequencedThread();
262 DCHECK(!surface_->IsOffscreen()); 141 DCHECK(!surface_->IsOffscreen());
263 surface_->Resize(size); 142 surface_->Resize(size);
264 } 143 }
265 144
266 bool InProcessCommandBuffer::MakeCurrent() { 145 bool InProcessCommandBuffer::MakeCurrent() {
267 CheckSequencedThread(); 146 CheckSequencedThread();
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
391 else 270 else
392 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); 271 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
393 } 272 }
394 273
395 if (!surface_.get()) { 274 if (!surface_.get()) {
396 LOG(ERROR) << "Could not create GLSurface."; 275 LOG(ERROR) << "Could not create GLSurface.";
397 DestroyOnGpuThread(); 276 DestroyOnGpuThread();
398 return false; 277 return false;
399 } 278 }
400 279
401 if (g_use_virtualized_gl_context) { 280 if (queue_->UseVirtualizedGLContexts()) {
402 context_ = gl_share_group_->GetSharedContext(); 281 context_ = gl_share_group_->GetSharedContext();
403 if (!context_.get()) { 282 if (!context_.get()) {
404 context_ = gfx::GLContext::CreateGLContext( 283 context_ = gfx::GLContext::CreateGLContext(
405 gl_share_group_.get(), surface_.get(), params.gpu_preference); 284 gl_share_group_.get(), surface_.get(), params.gpu_preference);
406 gl_share_group_->SetSharedContext(context_.get()); 285 gl_share_group_->SetSharedContext(context_.get());
407 } 286 }
408 287
409 context_ = new GLContextVirtual( 288 context_ = new GLContextVirtual(
410 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr()); 289 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
411 if (context_->Initialize(surface_.get(), params.gpu_preference)) { 290 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after
775 654
776 #if defined(OS_ANDROID) 655 #if defined(OS_ANDROID)
777 scoped_refptr<gfx::SurfaceTexture> 656 scoped_refptr<gfx::SurfaceTexture>
778 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) { 657 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
779 DCHECK(stream_texture_manager_); 658 DCHECK(stream_texture_manager_);
780 return stream_texture_manager_->GetSurfaceTexture(stream_id); 659 return stream_texture_manager_->GetSurfaceTexture(stream_id);
781 } 660 }
782 #endif 661 #endif
783 662
784 // static 663 // static
785 void InProcessCommandBuffer::EnableVirtualizedContext() {
786 g_use_virtualized_gl_context = true;
787 }
788
789 // static
790 void InProcessCommandBuffer::SetScheduleCallback(
791 const base::Closure& callback) {
792 DCHECK(!g_uses_explicit_scheduling);
793 DCHECK(!SchedulerClientBase::HasClients());
794 g_uses_explicit_scheduling = true;
795 g_gpu_queue.Get().SetScheduleCallback(callback);
796 }
797
798 // static
799 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
800 g_gpu_queue.Get().RunTasks();
801 }
802
803 // static
804 void InProcessCommandBuffer::SetGpuMemoryBufferFactory( 664 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
805 GpuMemoryBufferFactory* factory) { 665 GpuMemoryBufferFactory* factory) {
806 g_gpu_memory_buffer_factory = factory; 666 g_gpu_memory_buffer_factory = factory;
807 } 667 }
808 668
809 } // namespace gpu 669 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698