Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(733)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 19522006: GLInProcessContext: support async flushes and dedicated GPU thread (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: address comments Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <utility>
9
10 #include <GLES2/gl2.h>
11 #ifndef GL_GLEXT_PROTOTYPES
12 #define GL_GLEXT_PROTOTYPES 1
13 #endif
14 #include <GLES2/gl2ext.h>
15 #include <GLES2/gl2extchromium.h>
16
17 #include "base/bind.h"
18 #include "base/bind_helpers.h"
19 #include "base/lazy_instance.h"
20 #include "base/logging.h"
21 #include "base/memory/weak_ptr.h"
22 #include "base/message_loop/message_loop_proxy.h"
23 #include "base/threading/thread.h"
24 #include "gpu/command_buffer/common/id_allocator.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
31 #include "ui/gfx/size.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_share_group.h"
35 #include "ui/gl/gl_surface.h"
36
37 namespace gpu {
38
39 namespace {
40
41 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
43
44 static bool g_use_virtualized_gl_context = false;
45 static bool g_uses_explicit_scheduling = false;
46
47 template <typename T>
48 static void RunTaskWithResult(base::Callback<T(void)> task,
49 T* result,
50 base::WaitableEvent* completion) {
51 *result = task.Run();
52 completion->Signal();
53 }
54
55 class GpuInProcessThread
56 : public base::Thread,
57 public base::RefCountedThreadSafe<GpuInProcessThread> {
58 public:
59 GpuInProcessThread();
60
61 private:
62 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
63 virtual ~GpuInProcessThread();
64
65 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
66 };
67
68 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
69 Start();
70 }
71
72 GpuInProcessThread::~GpuInProcessThread() {
73 Stop();
74 }
75
76 // Used with explicit scheduling when there is no dedicated GPU thread.
77 class GpuCommandQueue {
78 public:
79 GpuCommandQueue();
80 ~GpuCommandQueue();
81
82 void QueueTask(const base::Closure& task);
83 void RunTasks();
84 void SetScheduleCallback(const base::Closure& callback);
85
86 private:
87 base::Lock tasks_lock_;
88 std::queue<base::Closure> tasks_;
89 base::Closure schedule_callback_;
90
91 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
92 };
93
94 GpuCommandQueue::GpuCommandQueue() {}
95
96 GpuCommandQueue::~GpuCommandQueue() {
97 base::AutoLock lock(tasks_lock_);
98 DCHECK(tasks_.empty());
99 }
100
101 void GpuCommandQueue::QueueTask(const base::Closure& task) {
102 {
103 base::AutoLock lock(tasks_lock_);
104 tasks_.push(task);
105 }
106
107 if (!schedule_callback_.is_null()) {
108 schedule_callback_.Run();
109 return;
110 }
111 }
112
113 void GpuCommandQueue::RunTasks() {
114 size_t num_tasks;
115 {
116 base::AutoLock lock(tasks_lock_);
117 num_tasks = tasks_.size();
118 }
119
120 while (num_tasks) {
121 base::Closure task;
122 {
123 base::AutoLock lock(tasks_lock_);
124 task = tasks_.front();
125 tasks_.pop();
126 num_tasks = tasks_.size();
127 }
128
129 task.Run();
130 }
131 }
132
133 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
134 DCHECK(schedule_callback_.is_null());
135 schedule_callback_ = callback;
136 }
137
138 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
139 LAZY_INSTANCE_INITIALIZER;
140
141 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
142 public:
143 explicit SchedulerClientBase(bool need_thread);
144 virtual ~SchedulerClientBase();
145
146 static bool HasClients();
147
148 protected:
149 scoped_refptr<GpuInProcessThread> thread_;
150
151 private:
152 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
153 static base::LazyInstance<base::Lock> all_clients_lock_;
154 };
155
156 base::LazyInstance<std::set<SchedulerClientBase*> >
157 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
158 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
159 LAZY_INSTANCE_INITIALIZER;
160
161 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
162 base::AutoLock(all_clients_lock_.Get());
163 if (need_thread) {
164 if (!all_clients_.Get().empty()) {
165 SchedulerClientBase* other = *all_clients_.Get().begin();
166 thread_ = other->thread_;
167 DCHECK(thread_.get());
168 } else {
169 thread_ = new GpuInProcessThread;
170 }
171 }
172 all_clients_.Get().insert(this);
173 }
174
175 SchedulerClientBase::~SchedulerClientBase() {
176 base::AutoLock(all_clients_lock_.Get());
177 all_clients_.Get().erase(this);
178 }
179
180 bool SchedulerClientBase::HasClients() {
181 base::AutoLock(all_clients_lock_.Get());
182 return !all_clients_.Get().empty();
183 }
184
185 // A client that talks to the GPU thread
186 class ThreadClient : public SchedulerClientBase {
187 public:
188 ThreadClient();
189 virtual void QueueTask(const base::Closure& task) OVERRIDE;
190 };
191
192 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
193 DCHECK(thread_.get());
194 }
195
196 void ThreadClient::QueueTask(const base::Closure& task) {
197 thread_->message_loop()->PostTask(FROM_HERE, task);
198 }
199
200 // A client that talks to the GpuCommandQueue
201 class QueueClient : public SchedulerClientBase {
202 public:
203 QueueClient();
204 virtual void QueueTask(const base::Closure& task) OVERRIDE;
205 };
206
207 QueueClient::QueueClient() : SchedulerClientBase(false) {
208 DCHECK(!thread_.get());
209 }
210
211 void QueueClient::QueueTask(const base::Closure& task) {
212 g_gpu_queue.Get().QueueTask(task);
213 }
214
215 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
216 CreateSchedulerClient() {
217 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
218 if (g_uses_explicit_scheduling)
219 client.reset(new QueueClient);
220 else
221 client.reset(new ThreadClient);
222
223 return client.Pass();
224 }
225
226 class ScopedEvent {
227 public:
228 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
229 ~ScopedEvent() { event_->Signal(); }
230
231 private:
232 base::WaitableEvent* event_;
233 };
234
235 } // anonyous namespace
236
237 InProcessCommandBuffer::InProcessCommandBuffer()
238 : context_lost_(false),
239 share_group_id_(0),
240 last_put_offset_(-1),
241 flush_event_(false, false),
242 queue_(CreateSchedulerClient()) {}
243
244 InProcessCommandBuffer::~InProcessCommandBuffer() {
245 Destroy();
246 }
247
248 bool InProcessCommandBuffer::IsContextLost() {
249 if (context_lost_ || !command_buffer_) {
250 return true;
251 }
252 CommandBuffer::State state = GetState();
253 return error::IsError(state.error);
254 }
255
256 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
257 DCHECK(!surface_->IsOffscreen());
258 surface_->Resize(size);
259 }
260
261 bool InProcessCommandBuffer::MakeCurrent() {
262 command_buffer_lock_.AssertAcquired();
263
264 if (!context_lost_ && decoder_->MakeCurrent())
265 return true;
266 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
267 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
268 command_buffer_->SetParseError(gpu::error::kLostContext);
269 return false;
270 }
271
272 void InProcessCommandBuffer::PumpCommands() {
273 ScopedEvent handle_flush(&flush_event_);
274 command_buffer_lock_.AssertAcquired();
275
276 if (!MakeCurrent())
277 return;
278
279 gpu_scheduler_->PutChanged();
280 CommandBuffer::State state = command_buffer_->GetState();
281 DCHECK((!error::IsError(state.error) && !context_lost_) ||
282 (error::IsError(state.error) && context_lost_));
283 }
284
285 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
286 command_buffer_lock_.AssertAcquired();
287 command_buffer_->SetGetBuffer(transfer_buffer_id);
288 return true;
289 }
290
291 bool InProcessCommandBuffer::Initialize(
292 bool is_offscreen,
293 bool share_resources,
294 gfx::AcceleratedWidget window,
295 const gfx::Size& size,
296 const char* allowed_extensions,
297 const std::vector<int32>& attribs,
298 gfx::GpuPreference gpu_preference,
299 const base::Closure& context_lost_callback,
300 unsigned int share_group_id) {
301
302 share_resources_ = share_resources;
303 context_lost_callback_ = WrapCallback(context_lost_callback);
304 share_group_id_ = share_group_id;
305
306 base::WaitableEvent completion(true, false);
307 bool result;
308 base::Callback<bool(void)> init_task =
309 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
310 base::Unretained(this),
311 is_offscreen,
312 window,
313 size,
314 allowed_extensions,
315 attribs,
316 gpu_preference);
317 QueueTask(
318 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
319 completion.Wait();
320 return result;
321 }
322
323 bool InProcessCommandBuffer::InitializeOnGpuThread(
324 bool is_offscreen,
325 gfx::AcceleratedWidget window,
326 const gfx::Size& size,
327 const char* allowed_extensions,
328 const std::vector<int32>& attribs,
329 gfx::GpuPreference gpu_preference) {
330 // Use one share group for all contexts.
331 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
332 (new gfx::GLShareGroup));
333
334 DCHECK(size.width() >= 0 && size.height() >= 0);
335
336 TransferBufferManager* manager = new TransferBufferManager();
337 transfer_buffer_manager_.reset(manager);
338 manager->Initialize();
339
340 scoped_ptr<CommandBufferService> command_buffer(
341 new CommandBufferService(transfer_buffer_manager_.get()));
342 command_buffer->SetPutOffsetChangeCallback(base::Bind(
343 &InProcessCommandBuffer::PumpCommands, base::Unretained(this)));
344 command_buffer->SetParseErrorCallback(base::Bind(
345 &InProcessCommandBuffer::OnContextLost, base::Unretained(this)));
346
347 if (!command_buffer->Initialize()) {
348 LOG(ERROR) << "Could not initialize command buffer.";
349 DestroyOnGpuThread();
350 return false;
351 }
352
353 InProcessCommandBuffer* context_group = NULL;
354
355 if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
356 DCHECK(share_group_id_);
357 for (std::set<InProcessCommandBuffer*>::iterator it =
358 g_all_shared_contexts.Get().begin();
359 it != g_all_shared_contexts.Get().end();
360 ++it) {
361 if ((*it)->share_group_id_ == share_group_id_) {
362 context_group = *it;
363 DCHECK(context_group->share_resources_);
364 context_lost_ = context_group->IsContextLost();
365 break;
366 }
367 }
368 if (!context_group)
369 share_group = new gfx::GLShareGroup;
370 }
371
372 bool bind_generates_resource = false;
373 decoder_.reset(gles2::GLES2Decoder::Create(
374 context_group ? context_group->decoder_->GetContextGroup()
375 : new gles2::ContextGroup(
376 NULL, NULL, NULL, NULL, bind_generates_resource)));
377
378 gpu_scheduler_.reset(
379 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
380 command_buffer->SetGetBufferChangeCallback(base::Bind(
381 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
382 command_buffer_ = command_buffer.Pass();
383
384 decoder_->set_engine(gpu_scheduler_.get());
385
386 if (is_offscreen)
387 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
388 else
389 surface_ = gfx::GLSurface::CreateViewGLSurface(window);
390
391 if (!surface_.get()) {
392 LOG(ERROR) << "Could not create GLSurface.";
393 DestroyOnGpuThread();
394 return false;
395 }
396
397 if (g_use_virtualized_gl_context) {
398 context_ = share_group->GetSharedContext();
399 if (!context_.get()) {
400 context_ = gfx::GLContext::CreateGLContext(
401 share_group.get(), surface_.get(), gpu_preference);
402 share_group->SetSharedContext(context_.get());
403 }
404
405 context_ = new GLContextVirtual(
406 share_group.get(), context_.get(), decoder_->AsWeakPtr());
407 if (context_->Initialize(surface_.get(), gpu_preference)) {
408 VLOG(1) << "Created virtual GL context.";
409 } else {
410 context_ = NULL;
411 }
412 } else {
413 context_ = gfx::GLContext::CreateGLContext(
414 share_group.get(), surface_.get(), gpu_preference);
415 }
416
417 if (!context_.get()) {
418 LOG(ERROR) << "Could not create GLContext.";
419 DestroyOnGpuThread();
420 return false;
421 }
422
423 if (!context_->MakeCurrent(surface_.get())) {
424 LOG(ERROR) << "Could not make context current.";
425 DestroyOnGpuThread();
426 return false;
427 }
428
429 gles2::DisallowedFeatures disallowed_features;
430 disallowed_features.swap_buffer_complete_callback = true;
431 disallowed_features.gpu_memory_manager = true;
432 if (!decoder_->Initialize(surface_,
433 context_,
434 is_offscreen,
435 size,
436 disallowed_features,
437 allowed_extensions,
438 attribs)) {
439 LOG(ERROR) << "Could not initialize decoder.";
440 DestroyOnGpuThread();
441 return false;
442 }
443
444 if (!is_offscreen) {
445 decoder_->SetResizeCallback(base::Bind(
446 &InProcessCommandBuffer::OnResizeView, base::Unretained(this)));
447 }
448
449 if (share_resources_) {
450 g_all_shared_contexts.Pointer()->insert(this);
451 }
452
453 return true;
454 }
455
456 void InProcessCommandBuffer::Destroy() {
457 base::WaitableEvent completion(true, false);
458 bool result;
459 base::Callback<bool(void)> destroy_task = base::Bind(
460 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
461 QueueTask(
462 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
463 completion.Wait();
464 }
465
466 bool InProcessCommandBuffer::DestroyOnGpuThread() {
467 command_buffer_.reset();
468 // Clean up GL resources if possible.
469 bool have_context = context_ && context_->MakeCurrent(surface_);
470 if (decoder_) {
471 decoder_->Destroy(have_context);
472 decoder_.reset();
473 }
474 context_ = NULL;
475 surface_ = NULL;
476
477 g_all_shared_contexts.Pointer()->erase(this);
478 return true;
479 }
480
481 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
482 gfx::GpuMemoryBufferHandle buffer,
483 gfx::Size size) {
484 unsigned int image_id;
485 {
486 // TODO: ID allocation should go through CommandBuffer
487 base::AutoLock lock(command_buffer_lock_);
488 gles2::ContextGroup* group = decoder_->GetContextGroup();
489 image_id =
490 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
491 }
492 base::Closure image_task =
493 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
494 base::Unretained(this), buffer, size, image_id);
495 QueueTask(image_task);
496 return image_id;
497 }
498
499 void InProcessCommandBuffer::CreateImageOnGpuThread(
500 gfx::GpuMemoryBufferHandle buffer,
501 gfx::Size size,
502 unsigned int image_id) {
503 scoped_refptr<gfx::GLImage> gl_image =
504 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
505 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
506 }
507
508 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
509 {
510 // TODO: ID allocation should go through CommandBuffer
511 base::AutoLock lock(command_buffer_lock_);
512 gles2::ContextGroup* group = decoder_->GetContextGroup();
513 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
514 }
515 base::Closure image_manager_task =
516 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
517 base::Unretained(this),
518 image_id);
519 QueueTask(image_manager_task);
520 }
521
522 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
523 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
524 }
525
526 void InProcessCommandBuffer::OnContextLost() {
527 if (!context_lost_callback_.is_null())
528 context_lost_callback_.Run();
529
530 context_lost_ = true;
531 if (share_resources_) {
532 for (std::set<InProcessCommandBuffer*>::iterator it =
533 g_all_shared_contexts.Get().begin();
534 it != g_all_shared_contexts.Get().end();
535 ++it) {
536 (*it)->context_lost_ = true;
537 }
538 }
539 }
540
541 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
542 base::AutoLock lock(command_buffer_lock_);
543 return last_state_ = command_buffer_->GetState();
544 }
545
546 CommandBuffer::State InProcessCommandBuffer::GetState() {
547 return GetStateFast();
548 }
549
550 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
551 return last_state_;
552 }
553
554 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; }
555
556 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
557 base::AutoLock lock(command_buffer_lock_);
558 command_buffer_->Flush(put_offset);
559 }
560
561 void InProcessCommandBuffer::Flush(int32 put_offset) {
562 if (last_state_.error != gpu::error::kNoError)
563 return;
564
565 if (last_put_offset_ == put_offset)
566 return;
567
568 last_put_offset_ = put_offset;
569 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
570 base::Unretained(this),
571 put_offset);
572 QueueTask(task);
573 }
574
575 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
576 int32 last_known_get) {
577 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
578 return last_state_;
579
580 Flush(put_offset);
581 GetStateFast();
582 while (last_known_get == last_state_.get_offset &&
583 last_state_.error == gpu::error::kNoError) {
584 flush_event_.Wait();
585 GetStateFast();
586 }
587
588 return last_state_;
589 }
590
591 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
592 if (last_state_.error != gpu::error::kNoError)
593 return;
594
595 {
596 base::AutoLock lock(command_buffer_lock_);
597 command_buffer_->SetGetBuffer(shm_id);
598 last_put_offset_ = 0;
599 }
600 GetStateFast();
601 }
602
603 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
604 int32* id) {
605 base::AutoLock lock(command_buffer_lock_);
606 return command_buffer_->CreateTransferBuffer(size, id);
607 }
608
609 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
610 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
611 base::Unretained(command_buffer_.get()),
612 id);
613
614 QueueTask(task);
615 }
616
617 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
618 NOTREACHED();
619 return gpu::Buffer();
620 }
621
622 uint32 InProcessCommandBuffer::InsertSyncPoint() {
623 NOTREACHED();
624 return 0;
625 }
626 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
627 const base::Closure& callback) {
628 QueueTask(WrapCallback(callback));
629 }
630
631 gpu::error::Error InProcessCommandBuffer::GetLastError() {
632 return last_state_.error;
633 }
634
635 bool InProcessCommandBuffer::Initialize() {
636 NOTREACHED();
637 return false;
638 }
639
640 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
641
642 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
643
644 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
645 NOTREACHED();
646 }
647
648 void InProcessCommandBuffer::SetContextLostReason(
649 gpu::error::ContextLostReason reason) {
650 NOTREACHED();
651 }
652
653 namespace {
654
655 static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
656 const base::Closure& callback) {
657 if (loop != base::MessageLoopProxy::current())
658 loop->PostTask(FROM_HERE, callback);
659 else
660 callback.Run();
661 }
662
663 static void RunCallback(scoped_ptr<base::Closure> callback) {
664 DCHECK(callback.get());
665 callback->Run();
666 }
667
668 } // anonymous namespace
669
670 base::Closure InProcessCommandBuffer::WrapCallback(
671 const base::Closure& callback) {
672 // Make sure the callback gets deleted on the target thread by passing
673 // ownership.
674 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
675 base::Closure callback_on_client_thread =
676 base::Bind(&RunCallback, base::Passed(&scoped_callback));
677 base::Closure wrapped_callback =
678 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
679 callback);
680 return wrapped_callback;
681 }
682
683 // static
684 void InProcessCommandBuffer::EnableVirtualizedContext() {
685 g_use_virtualized_gl_context = true;
686 }
687
688 // static
689 void InProcessCommandBuffer::SetScheduleCallback(
690 const base::Closure& callback) {
691 DCHECK(!g_uses_explicit_scheduling);
692 DCHECK(!SchedulerClientBase::HasClients());
693 g_uses_explicit_scheduling = true;
694 g_gpu_queue.Get().SetScheduleCallback(callback);
695 }
696
697 // static
698 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
699 g_gpu_queue.Get().RunTasks();
700 }
701
702 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698