Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(222)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 2493913002: Mus: Move InProcessCommandBuffer and GLInProcessContext to gpu/ipc (Closed)
Patch Set: Fixed casts on windows Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/command_buffer/service/in_process_command_buffer.h ('k') | gpu/ipc/BUILD.gn » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <stddef.h>
8 #include <stdint.h>
9
10 #include <queue>
11 #include <set>
12 #include <utility>
13
14 #include "base/bind.h"
15 #include "base/bind_helpers.h"
16 #include "base/command_line.h"
17 #include "base/lazy_instance.h"
18 #include "base/location.h"
19 #include "base/logging.h"
20 #include "base/memory/weak_ptr.h"
21 #include "base/numerics/safe_conversions.h"
22 #include "base/sequence_checker.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/threading/thread_task_runner_handle.h"
25 #include "gpu/command_buffer/client/gpu_control_client.h"
26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
28 #include "gpu/command_buffer/common/sync_token.h"
29 #include "gpu/command_buffer/service/command_buffer_service.h"
30 #include "gpu/command_buffer/service/command_executor.h"
31 #include "gpu/command_buffer/service/context_group.h"
32 #include "gpu/command_buffer/service/gl_context_virtual.h"
33 #include "gpu/command_buffer/service/gpu_preferences.h"
34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/image_manager.h"
36 #include "gpu/command_buffer/service/mailbox_manager.h"
37 #include "gpu/command_buffer/service/memory_program_cache.h"
38 #include "gpu/command_buffer/service/memory_tracking.h"
39 #include "gpu/command_buffer/service/query_manager.h"
40 #include "gpu/command_buffer/service/service_utils.h"
41 #include "gpu/command_buffer/service/sync_point_manager.h"
42 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
43 #include "ui/gfx/geometry/size.h"
44 #include "ui/gl/gl_context.h"
45 #include "ui/gl/gl_image.h"
46 #include "ui/gl/gl_image_shared_memory.h"
47 #include "ui/gl/gl_share_group.h"
48 #include "ui/gl/init/gl_factory.h"
49
50 #if defined(OS_WIN)
51 #include <windows.h>
52 #include "base/process/process_handle.h"
53 #endif
54
55 namespace gpu {
56
57 namespace {
58
59 base::StaticAtomicSequenceNumber g_next_command_buffer_id;
60
61 template <typename T>
62 static void RunTaskWithResult(base::Callback<T(void)> task,
63 T* result,
64 base::WaitableEvent* completion) {
65 *result = task.Run();
66 completion->Signal();
67 }
68
69 struct ScopedOrderNumberProcessor {
70 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num)
71 : order_data_(order_data), order_num_(order_num) {
72 order_data_->BeginProcessingOrderNumber(order_num_);
73 }
74
75 ~ScopedOrderNumberProcessor() {
76 order_data_->FinishProcessingOrderNumber(order_num_);
77 }
78
79 private:
80 SyncPointOrderData* order_data_;
81 uint32_t order_num_;
82 };
83
84 struct GpuInProcessThreadHolder {
85 GpuInProcessThreadHolder()
86 : sync_point_manager(new SyncPointManager(false)),
87 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {}
88 std::unique_ptr<SyncPointManager> sync_point_manager;
89 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
90 };
91
92 base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
93 LAZY_INSTANCE_INITIALIZER;
94
95 class ScopedEvent {
96 public:
97 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {}
98 ~ScopedEvent() { event_->Signal(); }
99
100 private:
101 base::WaitableEvent* event_;
102 };
103
104 base::SharedMemoryHandle ShareToGpuThread(
105 base::SharedMemoryHandle source_handle) {
106 return base::SharedMemory::DuplicateHandle(source_handle);
107 }
108
109 gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuThread(
110 const gfx::GpuMemoryBufferHandle& source_handle,
111 bool* requires_sync_point) {
112 switch (source_handle.type) {
113 case gfx::SHARED_MEMORY_BUFFER: {
114 gfx::GpuMemoryBufferHandle handle;
115 handle.type = gfx::SHARED_MEMORY_BUFFER;
116 handle.handle = ShareToGpuThread(source_handle.handle);
117 handle.offset = source_handle.offset;
118 handle.stride = source_handle.stride;
119 *requires_sync_point = false;
120 return handle;
121 }
122 case gfx::IO_SURFACE_BUFFER:
123 case gfx::OZONE_NATIVE_PIXMAP:
124 *requires_sync_point = true;
125 return source_handle;
126 default:
127 NOTREACHED();
128 return gfx::GpuMemoryBufferHandle();
129 }
130 }
131
132 scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
133 const scoped_refptr<InProcessCommandBuffer::Service>& service) {
134 if (service)
135 return service;
136
137 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
138 // instantiated before we create the GPU thread, otherwise shutdown order will
139 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
140 // and when the message loop is shutdown, it will recreate
141 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
142 // which causes a deadlock because it's already locked.
143 base::ThreadTaskRunnerHandle::IsSet();
144 return g_default_service.Get().gpu_thread;
145 }
146
147 } // anonyous namespace
148
149 InProcessCommandBuffer::Service::Service()
150 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
151
152 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
153 : gpu_preferences_(gpu_preferences),
154 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
155
156 InProcessCommandBuffer::Service::~Service() {}
157
158 const gpu::GpuPreferences&
159 InProcessCommandBuffer::Service::gpu_preferences() {
160 return gpu_preferences_;
161 }
162
163 const gpu::GpuDriverBugWorkarounds&
164 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() {
165 return gpu_driver_bug_workarounds_;
166 }
167
168 scoped_refptr<gl::GLShareGroup> InProcessCommandBuffer::Service::share_group() {
169 if (!share_group_.get())
170 share_group_ = new gl::GLShareGroup();
171 return share_group_;
172 }
173
174 scoped_refptr<gles2::MailboxManager>
175 InProcessCommandBuffer::Service::mailbox_manager() {
176 if (!mailbox_manager_.get()) {
177 mailbox_manager_ = gles2::MailboxManager::Create(gpu_preferences());
178 }
179 return mailbox_manager_;
180 }
181
182 gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() {
183 if (!program_cache_.get() &&
184 (gl::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
185 gl::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
186 !gpu_preferences().disable_gpu_program_cache) {
187 bool disable_disk_cache =
188 gpu_preferences_.disable_gpu_shader_disk_cache ||
189 gpu_driver_bug_workarounds_.disable_program_disk_cache;
190 program_cache_.reset(new gles2::MemoryProgramCache(
191 gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
192 }
193 return program_cache_.get();
194 }
195
196 InProcessCommandBuffer::InProcessCommandBuffer(
197 const scoped_refptr<Service>& service)
198 : command_buffer_id_(
199 CommandBufferId::FromUnsafeValue(g_next_command_buffer_id.GetNext())),
200 delayed_work_pending_(false),
201 image_factory_(nullptr),
202 gpu_control_client_(nullptr),
203 #if DCHECK_IS_ON()
204 context_lost_(false),
205 #endif
206 last_put_offset_(-1),
207 gpu_memory_buffer_manager_(nullptr),
208 next_fence_sync_release_(1),
209 flushed_fence_sync_release_(0),
210 flush_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
211 base::WaitableEvent::InitialState::NOT_SIGNALED),
212 service_(GetInitialService(service)),
213 fence_sync_wait_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
214 base::WaitableEvent::InitialState::NOT_SIGNALED),
215 client_thread_weak_ptr_factory_(this),
216 gpu_thread_weak_ptr_factory_(this) {
217 DCHECK(service_.get());
218 next_image_id_.GetNext();
219 }
220
221 InProcessCommandBuffer::~InProcessCommandBuffer() {
222 Destroy();
223 }
224
225 bool InProcessCommandBuffer::MakeCurrent() {
226 CheckSequencedThread();
227 command_buffer_lock_.AssertAcquired();
228
229 if (error::IsError(command_buffer_->GetLastState().error)) {
230 DLOG(ERROR) << "MakeCurrent failed because context lost.";
231 return false;
232 }
233 if (!decoder_->MakeCurrent()) {
234 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
235 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
236 command_buffer_->SetParseError(gpu::error::kLostContext);
237 return false;
238 }
239 return true;
240 }
241
242 void InProcessCommandBuffer::PumpCommandsOnGpuThread() {
243 CheckSequencedThread();
244 command_buffer_lock_.AssertAcquired();
245
246 if (!MakeCurrent())
247 return;
248
249 executor_->PutChanged();
250 }
251
252 bool InProcessCommandBuffer::Initialize(
253 scoped_refptr<gl::GLSurface> surface,
254 bool is_offscreen,
255 gfx::AcceleratedWidget window,
256 const gles2::ContextCreationAttribHelper& attribs,
257 InProcessCommandBuffer* share_group,
258 GpuMemoryBufferManager* gpu_memory_buffer_manager,
259 ImageFactory* image_factory,
260 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
261 DCHECK(!share_group || service_.get() == share_group->service_.get());
262
263 if (surface) {
264 // If a surface is provided, we are running in a webview and should not have
265 // a task runner.
266 DCHECK(!task_runner);
267
268 // GPU thread must be the same as client thread due to GLSurface not being
269 // thread safe.
270 sequence_checker_.reset(new base::SequenceChecker);
271 surface_ = surface;
272 } else {
273 DCHECK(task_runner);
274 origin_task_runner_ = std::move(task_runner);
275 client_thread_weak_ptr_ = client_thread_weak_ptr_factory_.GetWeakPtr();
276 }
277
278 gpu::Capabilities capabilities;
279 InitializeOnGpuThreadParams params(is_offscreen, window, attribs,
280 &capabilities, share_group, image_factory);
281
282 base::Callback<bool(void)> init_task =
283 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
284 base::Unretained(this), params);
285
286 base::WaitableEvent completion(
287 base::WaitableEvent::ResetPolicy::MANUAL,
288 base::WaitableEvent::InitialState::NOT_SIGNALED);
289 bool result = false;
290 QueueTask(
291 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
292 completion.Wait();
293
294 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
295
296 if (result)
297 capabilities_ = capabilities;
298
299 return result;
300 }
301
302 bool InProcessCommandBuffer::InitializeOnGpuThread(
303 const InitializeOnGpuThreadParams& params) {
304 CheckSequencedThread();
305 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
306
307 TransferBufferManager* manager = new TransferBufferManager(nullptr);
308 transfer_buffer_manager_ = manager;
309 manager->Initialize();
310
311 std::unique_ptr<CommandBufferService> command_buffer(
312 new CommandBufferService(transfer_buffer_manager_.get()));
313 command_buffer->SetPutOffsetChangeCallback(base::Bind(
314 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_));
315 command_buffer->SetParseErrorCallback(base::Bind(
316 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_));
317
318 gl_share_group_ = params.context_group
319 ? params.context_group->gl_share_group_
320 : service_->share_group();
321
322 bool bind_generates_resource = false;
323 scoped_refptr<gles2::FeatureInfo> feature_info =
324 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds());
325 decoder_.reset(gles2::GLES2Decoder::Create(
326 params.context_group
327 ? params.context_group->decoder_->GetContextGroup()
328 : new gles2::ContextGroup(
329 service_->gpu_preferences(), service_->mailbox_manager(), NULL,
330 service_->shader_translator_cache(),
331 service_->framebuffer_completeness_cache(), feature_info,
332 bind_generates_resource, nullptr, nullptr)));
333
334 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(),
335 decoder_.get()));
336 command_buffer->SetGetBufferChangeCallback(base::Bind(
337 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get())));
338 command_buffer_ = std::move(command_buffer);
339
340 decoder_->set_engine(executor_.get());
341
342 if (!surface_.get()) {
343 if (params.is_offscreen)
344 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
345 else
346 surface_ = gl::init::CreateViewGLSurface(params.window);
347 }
348
349 if (!surface_.get()) {
350 LOG(ERROR) << "Could not create GLSurface.";
351 DestroyOnGpuThread();
352 return false;
353 }
354
355 sync_point_order_data_ = SyncPointOrderData::Create();
356 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
357 sync_point_order_data_, GetNamespaceID(), GetCommandBufferID());
358
359 if (service_->UseVirtualizedGLContexts() ||
360 decoder_->GetContextGroup()
361 ->feature_info()
362 ->workarounds()
363 .use_virtualized_gl_contexts) {
364 context_ = gl_share_group_->GetSharedContext(surface_.get());
365 if (!context_.get()) {
366 context_ = gl::init::CreateGLContext(
367 gl_share_group_.get(), surface_.get(),
368 GenerateGLContextAttribs(
369 params.attribs, decoder_->GetContextGroup()->gpu_preferences()));
370 gl_share_group_->SetSharedContext(surface_.get(), context_.get());
371 }
372
373 context_ = new GLContextVirtual(
374 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
375 if (context_->Initialize(
376 surface_.get(),
377 GenerateGLContextAttribs(
378 params.attribs,
379 decoder_->GetContextGroup()->gpu_preferences()))) {
380 VLOG(1) << "Created virtual GL context.";
381 } else {
382 context_ = NULL;
383 }
384 } else {
385 context_ = gl::init::CreateGLContext(
386 gl_share_group_.get(), surface_.get(),
387 GenerateGLContextAttribs(
388 params.attribs, decoder_->GetContextGroup()->gpu_preferences()));
389 }
390
391 if (!context_.get()) {
392 LOG(ERROR) << "Could not create GLContext.";
393 DestroyOnGpuThread();
394 return false;
395 }
396
397 if (!context_->MakeCurrent(surface_.get())) {
398 LOG(ERROR) << "Could not make context current.";
399 DestroyOnGpuThread();
400 return false;
401 }
402
403 if (!decoder_->GetContextGroup()->has_program_cache() &&
404 !decoder_->GetContextGroup()
405 ->feature_info()
406 ->workarounds()
407 .disable_program_cache) {
408 decoder_->GetContextGroup()->set_program_cache(service_->program_cache());
409 }
410
411 gles2::DisallowedFeatures disallowed_features;
412 disallowed_features.gpu_memory_manager = true;
413 if (!decoder_->Initialize(surface_,
414 context_,
415 params.is_offscreen,
416 disallowed_features,
417 params.attribs)) {
418 LOG(ERROR) << "Could not initialize decoder.";
419 DestroyOnGpuThread();
420 return false;
421 }
422 *params.capabilities = decoder_->GetCapabilities();
423
424 decoder_->SetFenceSyncReleaseCallback(
425 base::Bind(&InProcessCommandBuffer::FenceSyncReleaseOnGpuThread,
426 base::Unretained(this)));
427 decoder_->SetWaitFenceSyncCallback(
428 base::Bind(&InProcessCommandBuffer::WaitFenceSyncOnGpuThread,
429 base::Unretained(this)));
430 decoder_->SetDescheduleUntilFinishedCallback(
431 base::Bind(&InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread,
432 base::Unretained(this)));
433 decoder_->SetRescheduleAfterFinishedCallback(
434 base::Bind(&InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread,
435 base::Unretained(this)));
436
437 image_factory_ = params.image_factory;
438
439 return true;
440 }
441
442 void InProcessCommandBuffer::Destroy() {
443 CheckSequencedThread();
444 client_thread_weak_ptr_factory_.InvalidateWeakPtrs();
445 gpu_control_client_ = nullptr;
446 base::WaitableEvent completion(
447 base::WaitableEvent::ResetPolicy::MANUAL,
448 base::WaitableEvent::InitialState::NOT_SIGNALED);
449 bool result = false;
450 base::Callback<bool(void)> destroy_task = base::Bind(
451 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
452 QueueTask(
453 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
454 completion.Wait();
455 }
456
457 bool InProcessCommandBuffer::DestroyOnGpuThread() {
458 CheckSequencedThread();
459 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
460 command_buffer_.reset();
461 // Clean up GL resources if possible.
462 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
463 if (decoder_) {
464 decoder_->Destroy(have_context);
465 decoder_.reset();
466 }
467 context_ = nullptr;
468 surface_ = nullptr;
469 sync_point_client_ = nullptr;
470 if (sync_point_order_data_) {
471 sync_point_order_data_->Destroy();
472 sync_point_order_data_ = nullptr;
473 }
474 gl_share_group_ = nullptr;
475
476 return true;
477 }
478
479 void InProcessCommandBuffer::CheckSequencedThread() {
480 DCHECK(!sequence_checker_ || sequence_checker_->CalledOnValidSequence());
481 }
482
483 void InProcessCommandBuffer::OnContextLostOnGpuThread() {
484 if (!origin_task_runner_)
485 return OnContextLost(); // Just kidding, we're on the client thread.
486 origin_task_runner_->PostTask(
487 FROM_HERE, base::Bind(&InProcessCommandBuffer::OnContextLost,
488 client_thread_weak_ptr_));
489 }
490
491 void InProcessCommandBuffer::OnContextLost() {
492 CheckSequencedThread();
493
494 #if DCHECK_IS_ON()
495 // This method shouldn't be called more than once.
496 DCHECK(!context_lost_);
497 context_lost_ = true;
498 #endif
499
500 if (gpu_control_client_)
501 gpu_control_client_->OnGpuControlLostContext();
502 }
503
504 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
505 CheckSequencedThread();
506 base::AutoLock lock(state_after_last_flush_lock_);
507 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
508 last_state_ = state_after_last_flush_;
509 return last_state_;
510 }
511
512 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
513 CheckSequencedThread();
514 return last_state_;
515 }
516
517 int32_t InProcessCommandBuffer::GetLastToken() {
518 CheckSequencedThread();
519 GetStateFast();
520 return last_state_.token;
521 }
522
523 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset,
524 uint32_t order_num) {
525 CheckSequencedThread();
526 ScopedEvent handle_flush(&flush_event_);
527 base::AutoLock lock(command_buffer_lock_);
528
529 {
530 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
531 order_num);
532 command_buffer_->Flush(put_offset);
533 {
534 // Update state before signaling the flush event.
535 base::AutoLock lock(state_after_last_flush_lock_);
536 state_after_last_flush_ = command_buffer_->GetLastState();
537 }
538
539 // Currently the in process command buffer does not support being
540 // descheduled, if it does we would need to back off on calling the finish
541 // processing number function until the message is rescheduled and finished
542 // processing. This DCHECK is to enforce this.
543 DCHECK(error::IsError(state_after_last_flush_.error) ||
544 put_offset == state_after_last_flush_.get_offset);
545 }
546
547 // If we've processed all pending commands but still have pending queries,
548 // pump idle work until the query is passed.
549 if (put_offset == state_after_last_flush_.get_offset &&
550 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) {
551 ScheduleDelayedWorkOnGpuThread();
552 }
553 }
554
555 void InProcessCommandBuffer::PerformDelayedWorkOnGpuThread() {
556 CheckSequencedThread();
557 delayed_work_pending_ = false;
558 base::AutoLock lock(command_buffer_lock_);
559 if (MakeCurrent()) {
560 executor_->PerformIdleWork();
561 executor_->ProcessPendingQueries();
562 if (executor_->HasMoreIdleWork() || executor_->HasPendingQueries()) {
563 ScheduleDelayedWorkOnGpuThread();
564 }
565 }
566 }
567
568 void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
569 CheckSequencedThread();
570 if (delayed_work_pending_)
571 return;
572 delayed_work_pending_ = true;
573 service_->ScheduleDelayedWork(
574 base::Bind(&InProcessCommandBuffer::PerformDelayedWorkOnGpuThread,
575 gpu_thread_weak_ptr_));
576 }
577
578 void InProcessCommandBuffer::Flush(int32_t put_offset) {
579 CheckSequencedThread();
580 if (last_state_.error != gpu::error::kNoError)
581 return;
582
583 if (last_put_offset_ == put_offset)
584 return;
585
586 SyncPointManager* sync_manager = service_->sync_point_manager();
587 const uint32_t order_num =
588 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
589 last_put_offset_ = put_offset;
590 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
591 gpu_thread_weak_ptr_,
592 put_offset,
593 order_num);
594 QueueTask(task);
595
596 flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
597 }
598
599 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
600 Flush(put_offset);
601 }
602
603 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) {
604 CheckSequencedThread();
605 while (!InRange(start, end, GetLastToken()) &&
606 last_state_.error == gpu::error::kNoError)
607 flush_event_.Wait();
608 }
609
610 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start,
611 int32_t end) {
612 CheckSequencedThread();
613
614 GetStateFast();
615 while (!InRange(start, end, last_state_.get_offset) &&
616 last_state_.error == gpu::error::kNoError) {
617 flush_event_.Wait();
618 GetStateFast();
619 }
620 }
621
622 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
623 CheckSequencedThread();
624 if (last_state_.error != gpu::error::kNoError)
625 return;
626
627 base::WaitableEvent completion(
628 base::WaitableEvent::ResetPolicy::MANUAL,
629 base::WaitableEvent::InitialState::NOT_SIGNALED);
630 base::Closure task =
631 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
632 base::Unretained(this), shm_id, &completion);
633 QueueTask(task);
634 completion.Wait();
635
636 {
637 base::AutoLock lock(state_after_last_flush_lock_);
638 state_after_last_flush_ = command_buffer_->GetLastState();
639 }
640 }
641
642 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
643 int32_t shm_id,
644 base::WaitableEvent* completion) {
645 base::AutoLock lock(command_buffer_lock_);
646 command_buffer_->SetGetBuffer(shm_id);
647 last_put_offset_ = 0;
648 completion->Signal();
649 }
650
651 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(
652 size_t size,
653 int32_t* id) {
654 CheckSequencedThread();
655 base::AutoLock lock(command_buffer_lock_);
656 return command_buffer_->CreateTransferBuffer(size, id);
657 }
658
659 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
660 CheckSequencedThread();
661 base::Closure task =
662 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
663 base::Unretained(this),
664 id);
665
666 QueueTask(task);
667 }
668
669 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
670 base::AutoLock lock(command_buffer_lock_);
671 command_buffer_->DestroyTransferBuffer(id);
672 }
673
674 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) {
675 gpu_control_client_ = client;
676 }
677
678 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
679 return capabilities_;
680 }
681
682 int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
683 size_t width,
684 size_t height,
685 unsigned internalformat) {
686 CheckSequencedThread();
687
688 DCHECK(gpu_memory_buffer_manager_);
689 gfx::GpuMemoryBuffer* gpu_memory_buffer =
690 reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer);
691 DCHECK(gpu_memory_buffer);
692
693 int32_t new_id = next_image_id_.GetNext();
694
695 DCHECK(gpu::IsGpuMemoryBufferFormatSupported(gpu_memory_buffer->GetFormat(),
696 capabilities_));
697 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
698 internalformat, gpu_memory_buffer->GetFormat()));
699
700 // This handle is owned by the GPU thread and must be passed to it or it
701 // will leak. In otherwords, do not early out on error between here and the
702 // queuing of the CreateImage task below.
703 bool requires_sync_point = false;
704 gfx::GpuMemoryBufferHandle handle =
705 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer->GetHandle(),
706 &requires_sync_point);
707
708 SyncPointManager* sync_manager = service_->sync_point_manager();
709 const uint32_t order_num =
710 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
711
712 uint64_t fence_sync = 0;
713 if (requires_sync_point) {
714 fence_sync = GenerateFenceSyncRelease();
715
716 // Previous fence syncs should be flushed already.
717 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
718 }
719
720 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
721 base::Unretained(this), new_id, handle,
722 gfx::Size(width, height), gpu_memory_buffer->GetFormat(),
723 internalformat, order_num, fence_sync));
724
725 if (fence_sync) {
726 flushed_fence_sync_release_ = fence_sync;
727 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
728 GetCommandBufferID(), fence_sync);
729 sync_token.SetVerifyFlush();
730 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
731 sync_token);
732 }
733
734 return new_id;
735 }
736
737 void InProcessCommandBuffer::CreateImageOnGpuThread(
738 int32_t id,
739 const gfx::GpuMemoryBufferHandle& handle,
740 const gfx::Size& size,
741 gfx::BufferFormat format,
742 uint32_t internalformat,
743 uint32_t order_num,
744 uint64_t fence_sync) {
745 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
746 order_num);
747 if (!decoder_)
748 return;
749
750 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
751 DCHECK(image_manager);
752 if (image_manager->LookupImage(id)) {
753 LOG(ERROR) << "Image already exists with same ID.";
754 return;
755 }
756
757 switch (handle.type) {
758 case gfx::SHARED_MEMORY_BUFFER: {
759 if (!base::IsValueInRangeForNumericType<size_t>(handle.stride)) {
760 LOG(ERROR) << "Invalid stride for image.";
761 return;
762 }
763 scoped_refptr<gl::GLImageSharedMemory> image(
764 new gl::GLImageSharedMemory(size, internalformat));
765 if (!image->Initialize(handle.handle, handle.id, format, handle.offset,
766 handle.stride)) {
767 LOG(ERROR) << "Failed to initialize image.";
768 return;
769 }
770
771 image_manager->AddImage(image.get(), id);
772 break;
773 }
774 default: {
775 if (!image_factory_) {
776 LOG(ERROR) << "Image factory missing but required by buffer type.";
777 return;
778 }
779
780 // Note: this assumes that client ID is always 0.
781 const int kClientId = 0;
782
783 scoped_refptr<gl::GLImage> image =
784 image_factory_->CreateImageForGpuMemoryBuffer(
785 handle, size, format, internalformat, kClientId,
786 kNullSurfaceHandle);
787 if (!image.get()) {
788 LOG(ERROR) << "Failed to create image for buffer.";
789 return;
790 }
791
792 image_manager->AddImage(image.get(), id);
793 break;
794 }
795 }
796
797 if (fence_sync) {
798 sync_point_client_->ReleaseFenceSync(fence_sync);
799 }
800 }
801
802 void InProcessCommandBuffer::DestroyImage(int32_t id) {
803 CheckSequencedThread();
804
805 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
806 base::Unretained(this),
807 id));
808 }
809
810 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
811 if (!decoder_)
812 return;
813
814 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
815 DCHECK(image_manager);
816 if (!image_manager->LookupImage(id)) {
817 LOG(ERROR) << "Image with ID doesn't exist.";
818 return;
819 }
820
821 image_manager->RemoveImage(id);
822 }
823
824 int32_t InProcessCommandBuffer::CreateGpuMemoryBufferImage(
825 size_t width,
826 size_t height,
827 unsigned internalformat,
828 unsigned usage) {
829 CheckSequencedThread();
830
831 DCHECK(gpu_memory_buffer_manager_);
832 std::unique_ptr<gfx::GpuMemoryBuffer> buffer(
833 gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer(
834 gfx::Size(width, height),
835 gpu::DefaultBufferFormatForImageFormat(internalformat),
836 gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle));
837 if (!buffer)
838 return -1;
839
840 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
841 }
842
843 void InProcessCommandBuffer::FenceSyncReleaseOnGpuThread(uint64_t release) {
844 DCHECK(!sync_point_client_->client_state()->IsFenceSyncReleased(release));
845 gles2::MailboxManager* mailbox_manager =
846 decoder_->GetContextGroup()->mailbox_manager();
847 if (mailbox_manager->UsesSync()) {
848 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
849 GetCommandBufferID(), release);
850 mailbox_manager->PushTextureUpdates(sync_token);
851 }
852
853 sync_point_client_->ReleaseFenceSync(release);
854 }
855
856 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
857 gpu::CommandBufferNamespace namespace_id,
858 gpu::CommandBufferId command_buffer_id,
859 uint64_t release) {
860 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
861 DCHECK(sync_point_manager);
862
863 scoped_refptr<gpu::SyncPointClientState> release_state =
864 sync_point_manager->GetSyncPointClientState(namespace_id,
865 command_buffer_id);
866
867 if (!release_state)
868 return true;
869
870 if (!release_state->IsFenceSyncReleased(release)) {
871 // Use waitable event which is signalled when the release fence is released.
872 sync_point_client_->Wait(
873 release_state.get(), release,
874 base::Bind(&base::WaitableEvent::Signal,
875 base::Unretained(&fence_sync_wait_event_)));
876 fence_sync_wait_event_.Wait();
877 }
878
879 gles2::MailboxManager* mailbox_manager =
880 decoder_->GetContextGroup()->mailbox_manager();
881 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
882 mailbox_manager->PullTextureUpdates(sync_token);
883 return true;
884 }
885
886 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
887 NOTIMPLEMENTED();
888 }
889
890 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
891 NOTIMPLEMENTED();
892 }
893
894 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
895 const SyncToken& sync_token, const base::Closure& callback) {
896 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
897 DCHECK(sync_point_manager);
898
899 scoped_refptr<gpu::SyncPointClientState> release_state =
900 sync_point_manager->GetSyncPointClientState(
901 sync_token.namespace_id(), sync_token.command_buffer_id());
902
903 if (!release_state) {
904 callback.Run();
905 return;
906 }
907
908 sync_point_client_->WaitOutOfOrder(
909 release_state.get(), sync_token.release_count(), WrapCallback(callback));
910 }
911
912 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
913 const base::Closure& callback) {
914 CheckSequencedThread();
915 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
916 base::Unretained(this),
917 query_id,
918 WrapCallback(callback)));
919 }
920
921 void InProcessCommandBuffer::SignalQueryOnGpuThread(
922 unsigned query_id,
923 const base::Closure& callback) {
924 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
925 DCHECK(query_manager_);
926
927 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
928 if (!query)
929 callback.Run();
930 else
931 query->AddCallback(callback);
932 }
933
934 void InProcessCommandBuffer::SetLock(base::Lock*) {
935 // No support for using on multiple threads.
936 NOTREACHED();
937 }
938
939 void InProcessCommandBuffer::EnsureWorkVisible() {
940 // This is only relevant for out-of-process command buffers.
941 }
942
943 CommandBufferNamespace InProcessCommandBuffer::GetNamespaceID() const {
944 return CommandBufferNamespace::IN_PROCESS;
945 }
946
947 CommandBufferId InProcessCommandBuffer::GetCommandBufferID() const {
948 return command_buffer_id_;
949 }
950
951 int32_t InProcessCommandBuffer::GetExtraCommandBufferData() const {
952 return 0;
953 }
954
955 uint64_t InProcessCommandBuffer::GenerateFenceSyncRelease() {
956 return next_fence_sync_release_++;
957 }
958
959 bool InProcessCommandBuffer::IsFenceSyncRelease(uint64_t release) {
960 return release != 0 && release < next_fence_sync_release_;
961 }
962
963 bool InProcessCommandBuffer::IsFenceSyncFlushed(uint64_t release) {
964 return release <= flushed_fence_sync_release_;
965 }
966
967 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) {
968 return IsFenceSyncFlushed(release);
969 }
970
971 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
972 const base::Closure& callback) {
973 CheckSequencedThread();
974 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
975 base::Unretained(this),
976 sync_token,
977 WrapCallback(callback)));
978 }
979
980 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
981 const SyncToken* sync_token) {
982 return sync_token->namespace_id() == GetNamespaceID();
983 }
984
985 gpu::error::Error InProcessCommandBuffer::GetLastError() {
986 CheckSequencedThread();
987 return last_state_.error;
988 }
989
990 namespace {
991
992 void PostCallback(
993 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
994 const base::Closure& callback) {
995 // The task_runner.get() check is to support using InProcessCommandBuffer on
996 // a thread without a message loop.
997 if (task_runner.get() && !task_runner->BelongsToCurrentThread()) {
998 task_runner->PostTask(FROM_HERE, callback);
999 } else {
1000 callback.Run();
1001 }
1002 }
1003
1004 void RunOnTargetThread(std::unique_ptr<base::Closure> callback) {
1005 DCHECK(callback.get());
1006 callback->Run();
1007 }
1008
1009 } // anonymous namespace
1010
1011 base::Closure InProcessCommandBuffer::WrapCallback(
1012 const base::Closure& callback) {
1013 // Make sure the callback gets deleted on the target thread by passing
1014 // ownership.
1015 std::unique_ptr<base::Closure> scoped_callback(new base::Closure(callback));
1016 base::Closure callback_on_client_thread =
1017 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
1018 base::Closure wrapped_callback =
1019 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet()
1020 ? base::ThreadTaskRunnerHandle::Get()
1021 : nullptr,
1022 callback_on_client_thread);
1023 return wrapped_callback;
1024 }
1025
1026 GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager)
1027 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) {
1028 Start();
1029 }
1030
1031 GpuInProcessThread::~GpuInProcessThread() {
1032 Stop();
1033 }
1034
1035 void GpuInProcessThread::AddRef() const {
1036 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
1037 }
1038 void GpuInProcessThread::Release() const {
1039 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
1040 }
1041
1042 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
1043 task_runner()->PostTask(FROM_HERE, task);
1044 }
1045
1046 void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) {
1047 // Match delay with GpuCommandBufferStub.
1048 task_runner()->PostDelayedTask(FROM_HERE, callback,
1049 base::TimeDelta::FromMilliseconds(2));
1050 }
1051
1052 bool GpuInProcessThread::UseVirtualizedGLContexts() {
1053 return false;
1054 }
1055
1056 scoped_refptr<gles2::ShaderTranslatorCache>
1057 GpuInProcessThread::shader_translator_cache() {
1058 if (!shader_translator_cache_.get()) {
1059 shader_translator_cache_ =
1060 new gpu::gles2::ShaderTranslatorCache(gpu_preferences());
1061 }
1062 return shader_translator_cache_;
1063 }
1064
1065 scoped_refptr<gles2::FramebufferCompletenessCache>
1066 GpuInProcessThread::framebuffer_completeness_cache() {
1067 if (!framebuffer_completeness_cache_.get())
1068 framebuffer_completeness_cache_ =
1069 new gpu::gles2::FramebufferCompletenessCache;
1070 return framebuffer_completeness_cache_;
1071 }
1072
1073 SyncPointManager* GpuInProcessThread::sync_point_manager() {
1074 return sync_point_manager_;
1075 }
1076
1077 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/service/in_process_command_buffer.h ('k') | gpu/ipc/BUILD.gn » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698