OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | |
6 | |
7 #include <queue> | |
8 #include <utility> | |
9 #include <vector> | |
10 | |
11 #include <GLES2/gl2.h> | |
12 #ifndef GL_GLEXT_PROTOTYPES | |
13 #define GL_GLEXT_PROTOTYPES 1 | |
14 #endif | |
15 #include <GLES2/gl2ext.h> | |
16 #include <GLES2/gl2extchromium.h> | |
17 | |
18 #include "base/bind.h" | |
19 #include "base/bind_helpers.h" | |
20 #include "base/lazy_instance.h" | |
21 #include "base/logging.h" | |
22 #include "base/memory/scoped_ptr.h" | |
23 #include "base/memory/weak_ptr.h" | |
24 #include "base/message_loop/message_loop.h" | |
25 #include "base/message_loop/message_loop_proxy.h" | |
26 #include "base/threading/thread.h" | |
27 #include "gpu/command_buffer/client/share_group.h" | |
28 #include "gpu/command_buffer/common/id_allocator.h" | |
29 #include "gpu/command_buffer/service/command_buffer_service.h" | |
30 #include "gpu/command_buffer/service/context_group.h" | |
31 #include "gpu/command_buffer/service/gl_context_virtual.h" | |
32 #include "gpu/command_buffer/service/gpu_scheduler.h" | |
33 #include "gpu/command_buffer/service/image_manager.h" | |
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | |
35 #include "ui/gfx/size.h" | |
36 #include "ui/gl/gl_context.h" | |
37 #include "ui/gl/gl_image.h" | |
38 #include "ui/gl/gl_share_group.h" | |
39 #include "ui/gl/gl_surface.h" | |
40 | |
41 namespace gpu { | |
42 | |
43 namespace { | |
44 | |
45 static base::LazyInstance<std::set<InProcessCommandBuffer*> > | |
46 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; | |
47 | |
48 static bool g_use_virtualized_gl_context = false; | |
49 | |
50 template <typename T> | |
51 static void RunTaskWithResult(base::Callback<T(void)> task, | |
52 T* result, | |
53 base::WaitableEvent* completion) { | |
54 *result = task.Run(); | |
55 completion->Signal(); | |
56 } | |
57 | |
58 class GpuInProcessThread : public base::Thread { | |
59 public: | |
60 GpuInProcessThread(); | |
61 virtual ~GpuInProcessThread(); | |
62 | |
63 private: | |
64 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); | |
65 }; | |
66 | |
67 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { | |
68 Start(); | |
69 } | |
70 | |
71 GpuInProcessThread::~GpuInProcessThread() { | |
72 Stop(); | |
73 } | |
74 | |
75 class GpuCommandQueue { | |
76 public: | |
77 GpuCommandQueue(); | |
78 virtual ~GpuCommandQueue(); | |
79 | |
80 void QueueTask(const base::Closure& task); | |
81 void RunTasks(); | |
82 void SetScheduleCallback(const base::Closure& callback); | |
83 | |
84 private: | |
85 base::Lock tasks_lock_; | |
86 std::queue<base::Closure> tasks_; | |
87 | |
88 base::Closure schedule_callback_; | |
89 base::LazyInstance<GpuInProcessThread> thread_; | |
90 | |
91 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); | |
92 }; | |
93 | |
94 GpuCommandQueue::GpuCommandQueue() {} | |
95 | |
96 GpuCommandQueue::~GpuCommandQueue() { | |
97 base::AutoLock lock(tasks_lock_); | |
98 DCHECK(tasks_.empty()); | |
99 } | |
100 | |
101 void GpuCommandQueue::QueueTask(const base::Closure& task) { | |
102 base::AutoLock lock(tasks_lock_); | |
103 tasks_.push(task); | |
104 | |
105 if (!schedule_callback_.is_null()) { | |
106 schedule_callback_.Run(); | |
107 return; | |
108 } | |
109 thread_.Get().message_loop() | |
110 ->PostTask(FROM_HERE, | |
111 base::Bind(&GpuCommandQueue::RunTasks, | |
112 base::Unretained(this))); | |
113 } | |
114 | |
115 void GpuCommandQueue::RunTasks() { | |
116 size_t num_tasks; | |
117 { | |
118 base::AutoLock lock(tasks_lock_); | |
119 num_tasks = tasks_.size(); | |
120 } | |
121 | |
122 while (num_tasks) { | |
123 base::Closure task; | |
124 { | |
125 base::AutoLock lock(tasks_lock_); | |
126 task = tasks_.front(); | |
127 tasks_.pop(); | |
128 num_tasks = tasks_.size(); | |
129 } | |
130 | |
131 task.Run(); | |
132 } | |
133 } | |
134 | |
135 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { | |
136 DCHECK(schedule_callback_.is_null()); | |
137 schedule_callback_ = callback; | |
138 } | |
139 | |
140 static base::LazyInstance<GpuCommandQueue>::Leaky g_gpu_queue = | |
141 LAZY_INSTANCE_INITIALIZER; | |
142 | |
143 static void QueueTask(const base::Closure& task) { | |
144 g_gpu_queue.Get().QueueTask(task); | |
145 } | |
146 | |
147 class ScopedEvent { | |
148 public: | |
149 ScopedEvent(base::WaitableEvent* event) : event_(event) {} | |
150 ~ScopedEvent() { event_->Signal(); } | |
151 | |
152 private: | |
153 base::WaitableEvent* event_; | |
154 }; | |
155 | |
156 } // anonyous namespace | |
157 | |
158 InProcessCommandBuffer::InProcessCommandBuffer() | |
159 : context_lost_(false), | |
160 last_put_offset_(-1), | |
161 flush_event_(false, false) {} | |
162 | |
163 InProcessCommandBuffer::~InProcessCommandBuffer() { | |
164 Destroy(); | |
165 } | |
166 | |
167 bool InProcessCommandBuffer::IsContextLost() { | |
168 if (context_lost_ || !command_buffer_) { | |
169 return true; | |
170 } | |
171 CommandBuffer::State state = GetState(); | |
172 return error::IsError(state.error); | |
173 } | |
174 | |
175 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { | |
176 DCHECK(!surface_->IsOffscreen()); | |
177 surface_->Resize(size); | |
178 } | |
179 | |
180 bool InProcessCommandBuffer::MakeCurrent() { | |
181 command_buffer_lock_.AssertAcquired(); | |
182 | |
183 if (decoder_->MakeCurrent()) | |
184 return true; | |
185 DLOG(ERROR) << "Context lost because MakeCurrent failed."; | |
186 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | |
187 command_buffer_->SetParseError(gpu::error::kLostContext); | |
188 return false; | |
189 } | |
190 | |
191 void InProcessCommandBuffer::PumpCommands() { | |
192 ScopedEvent handle_flush(&flush_event_); | |
193 command_buffer_lock_.AssertAcquired(); | |
194 | |
195 if (!MakeCurrent()) | |
196 return; | |
197 | |
198 gpu_scheduler_->PutChanged(); | |
199 CommandBuffer::State state = command_buffer_->GetState(); | |
200 DCHECK((!error::IsError(state.error) && !context_lost_) || | |
201 (error::IsError(state.error) && context_lost_)); | |
202 } | |
203 | |
204 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { | |
205 command_buffer_lock_.AssertAcquired(); | |
206 command_buffer_->SetGetBuffer(transfer_buffer_id); | |
207 return true; | |
208 } | |
209 | |
210 bool InProcessCommandBuffer::Initialize( | |
211 bool is_offscreen, | |
212 bool share_resources, | |
213 gfx::AcceleratedWidget window, | |
214 const gfx::Size& size, | |
215 const char* allowed_extensions, | |
216 const std::vector<int32>& attribs, | |
217 gfx::GpuPreference gpu_preference, | |
218 const base::Closure& context_lost_callback, | |
219 scoped_refptr<gles2::ShareGroup>* client_share_group) { | |
220 | |
221 share_resources_ = share_resources; | |
222 context_lost_callback_ = WrapCallback(context_lost_callback); | |
223 | |
224 base::WaitableEvent completion(true, false); | |
225 bool result; | |
226 base::Callback<bool(void)> init_task = | |
227 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, | |
228 base::Unretained(this), | |
229 is_offscreen, | |
230 window, | |
231 size, | |
232 allowed_extensions, | |
233 attribs, | |
234 gpu_preference); | |
235 QueueTask( | |
236 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); | |
237 completion.Wait(); | |
238 *client_share_group = client_share_group_; | |
239 return result; | |
240 } | |
241 | |
242 bool InProcessCommandBuffer::InitializeOnGpuThread( | |
243 bool is_offscreen, | |
244 gfx::AcceleratedWidget window, | |
245 const gfx::Size& size, | |
246 const char* allowed_extensions, | |
247 const std::vector<int32>& attribs, | |
248 gfx::GpuPreference gpu_preference) { | |
249 // Use one share group for all contexts. | |
250 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, | |
251 (new gfx::GLShareGroup)); | |
252 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gles2::ShareGroup>, client_share_group, | |
253 (new gles2::ShareGroup(true, false))); | |
254 | |
255 DCHECK(size.width() >= 0 && size.height() >= 0); | |
256 | |
257 TransferBufferManager* manager = new TransferBufferManager(); | |
258 transfer_buffer_manager_.reset(manager); | |
259 manager->Initialize(); | |
260 | |
261 scoped_ptr<CommandBufferService> command_buffer( | |
262 new CommandBufferService(transfer_buffer_manager_.get())); | |
263 command_buffer->SetPutOffsetChangeCallback(base::Bind( | |
264 &InProcessCommandBuffer::PumpCommands, base::Unretained(this))); | |
265 command_buffer->SetParseErrorCallback(base::Bind( | |
266 &InProcessCommandBuffer::OnContextLost, base::Unretained(this))); | |
267 | |
268 if (!command_buffer->Initialize()) { | |
269 LOG(ERROR) << "Could not initialize command buffer."; | |
270 Destroy(); | |
271 return false; | |
272 } | |
273 | |
274 InProcessCommandBuffer* context_group = NULL; | |
275 | |
276 if (share_resources_ && !g_all_shared_contexts.Get().empty()) { | |
277 for (std::set<InProcessCommandBuffer*>::iterator it = | |
278 g_all_shared_contexts.Get().begin(); | |
279 it != g_all_shared_contexts.Get().end(); | |
280 ++it) { | |
281 if (!(*it)->IsContextLost()) { | |
282 context_group = *it; | |
283 break; | |
284 } | |
285 } | |
286 if (!context_group) { | |
287 share_group = new gfx::GLShareGroup; | |
288 client_share_group = new gles2::ShareGroup(true, false); | |
289 } | |
290 } | |
291 | |
292 if (share_resources_) | |
293 client_share_group_ = client_share_group; | |
294 | |
295 // TODO(gman): This needs to be true if this is Pepper. | |
296 bool bind_generates_resource = false; | |
297 decoder_.reset(gles2::GLES2Decoder::Create( | |
298 context_group ? context_group->decoder_->GetContextGroup() | |
299 : new gles2::ContextGroup( | |
300 NULL, NULL, NULL, NULL, bind_generates_resource))); | |
301 | |
302 gpu_scheduler_.reset( | |
303 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); | |
304 command_buffer->SetGetBufferChangeCallback(base::Bind( | |
305 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); | |
306 command_buffer_ = command_buffer.Pass(); | |
307 | |
308 decoder_->set_engine(gpu_scheduler_.get()); | |
309 | |
310 if (is_offscreen) | |
311 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); | |
312 else | |
313 surface_ = gfx::GLSurface::CreateViewGLSurface(window); | |
314 | |
315 if (!surface_.get()) { | |
316 LOG(ERROR) << "Could not create GLSurface."; | |
317 Destroy(); | |
318 return false; | |
319 } | |
320 | |
321 if (g_use_virtualized_gl_context) { | |
322 context_ = share_group->GetSharedContext(); | |
323 if (!context_.get()) { | |
324 context_ = gfx::GLContext::CreateGLContext( | |
325 share_group.get(), surface_.get(), gpu_preference); | |
326 share_group->SetSharedContext(context_.get()); | |
327 } | |
328 | |
329 context_ = new GLContextVirtual( | |
330 share_group.get(), context_.get(), decoder_->AsWeakPtr()); | |
331 if (context_->Initialize(surface_.get(), gpu_preference)) { | |
332 VLOG(1) << "Created virtual GL context."; | |
333 } else { | |
334 context_ = NULL; | |
335 } | |
336 } else { | |
337 context_ = gfx::GLContext::CreateGLContext( | |
338 share_group.get(), surface_.get(), gpu_preference); | |
339 } | |
340 | |
341 if (!context_.get()) { | |
342 LOG(ERROR) << "Could not create GLContext."; | |
343 Destroy(); | |
344 return false; | |
345 } | |
346 | |
347 if (!context_->MakeCurrent(surface_.get())) { | |
348 LOG(ERROR) << "Could not make context current."; | |
349 Destroy(); | |
350 return false; | |
351 } | |
352 | |
353 gles2::DisallowedFeatures disallowed_features; | |
354 disallowed_features.swap_buffer_complete_callback = true; | |
355 disallowed_features.gpu_memory_manager = true; | |
356 if (!decoder_->Initialize(surface_, | |
357 context_, | |
358 is_offscreen, | |
359 size, | |
360 disallowed_features, | |
361 allowed_extensions, | |
362 attribs)) { | |
363 LOG(ERROR) << "Could not initialize decoder."; | |
364 Destroy(); | |
365 return false; | |
366 } | |
367 | |
368 if (!is_offscreen) { | |
369 decoder_->SetResizeCallback(base::Bind( | |
370 &InProcessCommandBuffer::OnResizeView, base::Unretained(this))); | |
371 } | |
372 | |
373 if (share_resources_) { | |
374 g_all_shared_contexts.Pointer()->insert(this); | |
375 } | |
376 | |
377 return true; | |
378 } | |
379 | |
380 void InProcessCommandBuffer::Destroy() { | |
381 base::WaitableEvent completion(true, false); | |
382 bool result; | |
383 base::Callback<bool(void)> destroy_task = base::Bind( | |
384 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | |
385 QueueTask( | |
386 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | |
387 completion.Wait(); | |
388 } | |
389 | |
390 bool InProcessCommandBuffer::DestroyOnGpuThread() { | |
391 if (decoder_) { | |
392 decoder_->Destroy(!IsContextLost()); | |
393 decoder_.reset(); | |
394 } | |
395 | |
396 g_all_shared_contexts.Pointer()->erase(this); | |
397 return true; | |
398 } | |
399 | |
400 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( | |
401 gfx::GpuMemoryBufferHandle buffer, | |
402 gfx::Size size) { | |
403 unsigned int image_id; | |
404 { | |
405 // TODO: ID allocation should go through CommandBuffer | |
406 base::AutoLock lock(command_buffer_lock_); | |
407 gles2::ContextGroup* group = decoder_->GetContextGroup(); | |
408 image_id = | |
409 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); | |
410 } | |
411 base::Closure image_task = | |
412 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, | |
413 base::Unretained(this), buffer, size, image_id); | |
414 QueueTask(image_task); | |
415 return image_id; | |
416 } | |
417 | |
418 void InProcessCommandBuffer::CreateImageOnGpuThread( | |
419 gfx::GpuMemoryBufferHandle buffer, | |
420 gfx::Size size, | |
421 unsigned int image_id) { | |
422 scoped_refptr<gfx::GLImage> gl_image = | |
423 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); | |
424 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); | |
425 } | |
426 | |
427 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { | |
428 { | |
429 // TODO: ID allocation should go through CommandBuffer | |
430 base::AutoLock lock(command_buffer_lock_); | |
431 gles2::ContextGroup* group = decoder_->GetContextGroup(); | |
432 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); | |
433 } | |
434 base::Closure image_manager_task = | |
435 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, | |
436 base::Unretained(this), | |
437 image_id); | |
438 QueueTask(image_manager_task); | |
439 } | |
440 | |
441 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { | |
442 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); | |
443 } | |
444 | |
445 void InProcessCommandBuffer::OnContextLost() { | |
446 if (!context_lost_callback_.is_null()) | |
447 context_lost_callback_.Run(); | |
448 | |
449 context_lost_ = true; | |
450 if (share_resources_) { | |
451 for (std::set<InProcessCommandBuffer*>::iterator it = | |
452 g_all_shared_contexts.Get().begin(); | |
453 it != g_all_shared_contexts.Get().end(); | |
454 ++it) { | |
455 (*it)->context_lost_ = true; | |
456 if (!(*it)->context_lost_callback_.is_null()) | |
457 (*it)->context_lost_callback_.Run(); | |
no sievers
2013/07/25 00:41:23
I noticed the callback wasn't run before for the o
| |
458 } | |
459 } | |
460 } | |
461 | |
462 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | |
463 base::AutoLock lock(command_buffer_lock_); | |
464 return last_state_ = command_buffer_->GetState(); | |
465 } | |
466 | |
467 CommandBuffer::State InProcessCommandBuffer::GetState() { | |
468 return GetStateFast(); | |
469 } | |
470 | |
471 CommandBuffer::State InProcessCommandBuffer::GetLastState() { | |
472 return last_state_; | |
473 } | |
474 | |
475 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; } | |
476 | |
477 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { | |
478 base::AutoLock lock(command_buffer_lock_); | |
479 command_buffer_->Flush(put_offset); | |
480 } | |
481 | |
482 void InProcessCommandBuffer::Flush(int32 put_offset) { | |
483 if (last_state_.error != gpu::error::kNoError) | |
484 return; | |
485 | |
486 if (last_put_offset_ == put_offset) | |
487 return; | |
488 | |
489 last_put_offset_ = put_offset; | |
490 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | |
491 base::Unretained(this), | |
492 put_offset); | |
493 QueueTask(task); | |
494 } | |
495 | |
496 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, | |
497 int32 last_known_get) { | |
498 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) | |
499 return last_state_; | |
500 | |
501 Flush(put_offset); | |
502 GetStateFast(); | |
503 while (last_known_get == last_state_.get_offset && | |
504 last_state_.error == gpu::error::kNoError) { | |
505 flush_event_.Wait(); | |
506 GetStateFast(); | |
507 } | |
508 | |
509 return last_state_; | |
510 } | |
511 | |
512 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { | |
513 if (last_state_.error != gpu::error::kNoError) | |
514 return; | |
515 | |
516 { | |
517 base::AutoLock lock(command_buffer_lock_); | |
518 command_buffer_->SetGetBuffer(shm_id); | |
519 last_put_offset_ = 0; | |
520 } | |
521 GetStateFast(); | |
522 } | |
523 | |
524 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, | |
525 int32* id) { | |
526 base::AutoLock lock(command_buffer_lock_); | |
527 return command_buffer_->CreateTransferBuffer(size, id); | |
528 } | |
529 | |
530 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { | |
531 base::AutoLock lock(command_buffer_lock_); | |
532 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, | |
533 base::Unretained(command_buffer_.get()), | |
534 id); | |
535 | |
536 QueueTask(task); | |
537 } | |
538 | |
539 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { | |
540 NOTREACHED(); | |
541 return gpu::Buffer(); | |
542 } | |
543 | |
544 uint32 InProcessCommandBuffer::InsertSyncPoint() { | |
545 NOTREACHED(); | |
546 return 0; | |
547 } | |
548 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, | |
549 const base::Closure& callback) { | |
550 QueueTask(WrapCallback(callback)); | |
551 } | |
552 | |
553 gpu::error::Error InProcessCommandBuffer::GetLastError() { | |
554 return last_state_.error; | |
555 } | |
556 | |
557 bool InProcessCommandBuffer::Initialize() { | |
558 NOTREACHED(); | |
559 return false; | |
560 } | |
561 | |
562 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } | |
563 | |
564 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); } | |
565 | |
566 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) { | |
567 NOTREACHED(); | |
568 } | |
569 | |
570 void InProcessCommandBuffer::SetContextLostReason( | |
571 gpu::error::ContextLostReason reason) { | |
572 NOTREACHED(); | |
573 } | |
574 | |
575 static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop, | |
576 const base::Closure& callback) { | |
577 if (loop != base::MessageLoopProxy::current()) | |
578 loop->PostTask(FROM_HERE, callback); | |
579 else | |
580 callback.Run(); | |
581 } | |
582 | |
583 base::Closure InProcessCommandBuffer::WrapCallback( | |
584 const base::Closure& callback) { | |
585 base::Closure wrapped_callback = | |
586 base::Bind(&PostCallback, base::MessageLoopProxy::current(), callback); | |
587 return wrapped_callback; | |
588 } | |
589 | |
590 // static | |
591 void InProcessCommandBuffer::EnableVirtualizedContext() { | |
592 g_use_virtualized_gl_context = true; | |
593 } | |
594 | |
595 // static | |
596 void InProcessCommandBuffer::SetScheduleCallback( | |
597 const base::Closure& callback) { | |
598 g_gpu_queue.Get().SetScheduleCallback(callback); | |
599 } | |
600 | |
601 // static | |
602 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { | |
603 g_gpu_queue.Get().RunTasks(); | |
604 } | |
605 | |
606 } // namespace gpu | |
OLD | NEW |