Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: gpu/ipc/in_process_command_buffer.cc

Issue 2498053004: Add InProcessContextProvider and update InProcessCommandBuffer (Closed)
Patch Set: Revert experiments and fix android_webview Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/in_process_command_buffer.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/in_process_command_buffer.h" 5 #include "gpu/ipc/in_process_command_buffer.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <queue> 10 #include <queue>
11 #include <set> 11 #include <set>
12 #include <utility> 12 #include <utility>
13 13
14 #include "base/bind.h" 14 #include "base/bind.h"
15 #include "base/bind_helpers.h" 15 #include "base/bind_helpers.h"
16 #include "base/command_line.h" 16 #include "base/command_line.h"
17 #include "base/lazy_instance.h" 17 #include "base/lazy_instance.h"
18 #include "base/location.h" 18 #include "base/location.h"
19 #include "base/logging.h" 19 #include "base/logging.h"
20 #include "base/memory/ptr_util.h"
20 #include "base/memory/weak_ptr.h" 21 #include "base/memory/weak_ptr.h"
21 #include "base/numerics/safe_conversions.h" 22 #include "base/numerics/safe_conversions.h"
22 #include "base/sequence_checker.h" 23 #include "base/sequence_checker.h"
23 #include "base/single_thread_task_runner.h" 24 #include "base/single_thread_task_runner.h"
24 #include "base/threading/thread_task_runner_handle.h" 25 #include "base/threading/thread_task_runner_handle.h"
25 #include "gpu/command_buffer/client/gpu_control_client.h" 26 #include "gpu/command_buffer/client/gpu_control_client.h"
26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 27 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" 28 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
28 #include "gpu/command_buffer/common/sync_token.h" 29 #include "gpu/command_buffer/common/sync_token.h"
29 #include "gpu/command_buffer/service/command_buffer_service.h" 30 #include "gpu/command_buffer/service/command_buffer_service.h"
30 #include "gpu/command_buffer/service/command_executor.h" 31 #include "gpu/command_buffer/service/command_executor.h"
31 #include "gpu/command_buffer/service/context_group.h" 32 #include "gpu/command_buffer/service/context_group.h"
32 #include "gpu/command_buffer/service/gl_context_virtual.h" 33 #include "gpu/command_buffer/service/gl_context_virtual.h"
33 #include "gpu/command_buffer/service/gpu_preferences.h" 34 #include "gpu/command_buffer/service/gpu_preferences.h"
34 #include "gpu/command_buffer/service/image_factory.h" 35 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/image_manager.h" 36 #include "gpu/command_buffer/service/image_manager.h"
36 #include "gpu/command_buffer/service/mailbox_manager.h" 37 #include "gpu/command_buffer/service/mailbox_manager.h"
37 #include "gpu/command_buffer/service/memory_program_cache.h" 38 #include "gpu/command_buffer/service/memory_program_cache.h"
38 #include "gpu/command_buffer/service/memory_tracking.h" 39 #include "gpu/command_buffer/service/memory_tracking.h"
39 #include "gpu/command_buffer/service/query_manager.h" 40 #include "gpu/command_buffer/service/query_manager.h"
40 #include "gpu/command_buffer/service/service_utils.h" 41 #include "gpu/command_buffer/service/service_utils.h"
41 #include "gpu/command_buffer/service/sync_point_manager.h" 42 #include "gpu/command_buffer/service/sync_point_manager.h"
42 #include "gpu/command_buffer/service/transfer_buffer_manager.h" 43 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
44 #include "gpu/ipc/gpu_in_process_thread_service.h"
45 #include "gpu/ipc/service/image_transport_surface.h"
43 #include "ui/gfx/geometry/size.h" 46 #include "ui/gfx/geometry/size.h"
44 #include "ui/gl/gl_context.h" 47 #include "ui/gl/gl_context.h"
45 #include "ui/gl/gl_image.h" 48 #include "ui/gl/gl_image.h"
46 #include "ui/gl/gl_image_shared_memory.h" 49 #include "ui/gl/gl_image_shared_memory.h"
47 #include "ui/gl/gl_share_group.h" 50 #include "ui/gl/gl_share_group.h"
48 #include "ui/gl/init/gl_factory.h" 51 #include "ui/gl/init/gl_factory.h"
49 52
50 #if defined(OS_WIN) 53 #if defined(OS_WIN)
51 #include <windows.h> 54 #include <windows.h>
52 #include "base/process/process_handle.h" 55 #include "base/process/process_handle.h"
53 #endif 56 #endif
54 57
58 #if defined(OS_MACOSX)
59 #include "gpu/ipc/client/gpu_process_hosted_ca_layer_tree_params.h"
60 #endif
61
55 namespace gpu { 62 namespace gpu {
56 63
57 namespace { 64 namespace {
58 65
59 base::StaticAtomicSequenceNumber g_next_command_buffer_id; 66 base::StaticAtomicSequenceNumber g_next_command_buffer_id;
60 67
61 template <typename T> 68 template <typename T>
62 static void RunTaskWithResult(base::Callback<T(void)> task, 69 static void RunTaskWithResult(base::Callback<T(void)> task,
63 T* result, 70 T* result,
64 base::WaitableEvent* completion) { 71 base::WaitableEvent* completion) {
65 *result = task.Run(); 72 *result = task.Run();
66 completion->Signal(); 73 completion->Signal();
67 } 74 }
68 75
69 struct ScopedOrderNumberProcessor { 76 class GpuInProcessThreadHolder : public base::Thread {
70 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) 77 public:
71 : order_data_(order_data), order_num_(order_num) { 78 GpuInProcessThreadHolder()
72 order_data_->BeginProcessingOrderNumber(order_num_); 79 : base::Thread("GpuThread"),
80 sync_point_manager_(new SyncPointManager(false)) {
81 Start();
73 } 82 }
74 83
75 ~ScopedOrderNumberProcessor() { 84 ~GpuInProcessThreadHolder() override { Stop(); }
76 order_data_->FinishProcessingOrderNumber(order_num_); 85
86 const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
87 if (!gpu_thread_service_) {
88 gpu_thread_service_ = new GpuInProcessThreadService(
89 task_runner(), sync_point_manager_.get(), nullptr, nullptr);
90 }
91 return gpu_thread_service_;
77 } 92 }
78 93
79 private: 94 private:
80 SyncPointOrderData* order_data_; 95 std::unique_ptr<SyncPointManager> sync_point_manager_;
81 uint32_t order_num_; 96 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
82 };
83
84 struct GpuInProcessThreadHolder {
85 GpuInProcessThreadHolder()
86 : sync_point_manager(new SyncPointManager(false)),
87 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {}
88 std::unique_ptr<SyncPointManager> sync_point_manager;
89 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
90 }; 97 };
91 98
92 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = 99 base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
93 LAZY_INSTANCE_INITIALIZER; 100 LAZY_INSTANCE_INITIALIZER;
94 101
95 class ScopedEvent { 102 class ScopedEvent {
96 public: 103 public:
97 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {} 104 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {}
98 ~ScopedEvent() { event_->Signal(); } 105 ~ScopedEvent() { event_->Signal(); }
99 106
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
134 if (service) 141 if (service)
135 return service; 142 return service;
136 143
137 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is 144 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
138 // instantiated before we create the GPU thread, otherwise shutdown order will 145 // instantiated before we create the GPU thread, otherwise shutdown order will
139 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop, 146 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
140 // and when the message loop is shutdown, it will recreate 147 // and when the message loop is shutdown, it will recreate
141 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager, 148 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
142 // which causes a deadlock because it's already locked. 149 // which causes a deadlock because it's already locked.
143 base::ThreadTaskRunnerHandle::IsSet(); 150 base::ThreadTaskRunnerHandle::IsSet();
144 return g_default_service.Get().gpu_thread; 151 return g_default_service.Get().GetGpuThreadService();
145 } 152 }
146 153
147 } // anonyous namespace 154 } // anonyous namespace
148 155
149 InProcessCommandBuffer::Service::Service() 156 InProcessCommandBuffer::Service::Service()
150 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} 157 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
151 158
152 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences) 159 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
153 : gpu_preferences_(gpu_preferences), 160 : gpu_preferences_(gpu_preferences),
154 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} 161 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
155 162
163 InProcessCommandBuffer::Service::Service(
164 gpu::gles2::MailboxManager* mailbox_manager,
165 scoped_refptr<gl::GLShareGroup> share_group)
166 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
167 mailbox_manager_(mailbox_manager),
168 share_group_(share_group) {}
169
156 InProcessCommandBuffer::Service::~Service() {} 170 InProcessCommandBuffer::Service::~Service() {}
157 171
158 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { 172 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() {
159 return gpu_preferences_; 173 return gpu_preferences_;
160 } 174 }
161 175
162 const gpu::GpuDriverBugWorkarounds& 176 const gpu::GpuDriverBugWorkarounds&
163 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { 177 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() {
164 return gpu_driver_bug_workarounds_; 178 return gpu_driver_bug_workarounds_;
165 } 179 }
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
244 258
245 if (!MakeCurrent()) 259 if (!MakeCurrent())
246 return; 260 return;
247 261
248 executor_->PutChanged(); 262 executor_->PutChanged();
249 } 263 }
250 264
251 bool InProcessCommandBuffer::Initialize( 265 bool InProcessCommandBuffer::Initialize(
252 scoped_refptr<gl::GLSurface> surface, 266 scoped_refptr<gl::GLSurface> surface,
253 bool is_offscreen, 267 bool is_offscreen,
254 gfx::AcceleratedWidget window, 268 SurfaceHandle window,
255 const gles2::ContextCreationAttribHelper& attribs, 269 const gles2::ContextCreationAttribHelper& attribs,
256 InProcessCommandBuffer* share_group, 270 InProcessCommandBuffer* share_group,
257 GpuMemoryBufferManager* gpu_memory_buffer_manager, 271 GpuMemoryBufferManager* gpu_memory_buffer_manager,
258 ImageFactory* image_factory, 272 ImageFactory* image_factory,
259 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { 273 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
260 DCHECK(!share_group || service_.get() == share_group->service_.get()); 274 DCHECK(!share_group || service_.get() == share_group->service_.get());
261 275
262 if (surface) { 276 if (surface) {
263 // If a surface is provided, we are running in a webview and should not have 277 // If a surface is provided, we are running in a webview and should not have
264 // a task runner. 278 // a task runner.
(...skipping 14 matching lines...) Expand all
279 &capabilities, share_group, image_factory); 293 &capabilities, share_group, image_factory);
280 294
281 base::Callback<bool(void)> init_task = 295 base::Callback<bool(void)> init_task =
282 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, 296 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
283 base::Unretained(this), params); 297 base::Unretained(this), params);
284 298
285 base::WaitableEvent completion( 299 base::WaitableEvent completion(
286 base::WaitableEvent::ResetPolicy::MANUAL, 300 base::WaitableEvent::ResetPolicy::MANUAL,
287 base::WaitableEvent::InitialState::NOT_SIGNALED); 301 base::WaitableEvent::InitialState::NOT_SIGNALED);
288 bool result = false; 302 bool result = false;
289 QueueTask( 303 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result,
290 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); 304 &completion));
291 completion.Wait(); 305 completion.Wait();
292 306
293 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; 307 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
294 308
295 if (result) 309 if (result)
296 capabilities_ = capabilities; 310 capabilities_ = capabilities;
297 311
298 return result; 312 return result;
299 } 313 }
300 314
(...skipping 12 matching lines...) Expand all
313 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); 327 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_));
314 command_buffer->SetParseErrorCallback(base::Bind( 328 command_buffer->SetParseErrorCallback(base::Bind(
315 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); 329 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_));
316 330
317 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ 331 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_
318 : service_->share_group(); 332 : service_->share_group();
319 333
320 bool bind_generates_resource = false; 334 bool bind_generates_resource = false;
321 scoped_refptr<gles2::FeatureInfo> feature_info = 335 scoped_refptr<gles2::FeatureInfo> feature_info =
322 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); 336 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds());
323 decoder_.reset(gles2::GLES2Decoder::Create( 337
338 context_group_ =
324 params.context_group 339 params.context_group
325 ? params.context_group->decoder_->GetContextGroup() 340 ? params.context_group->decoder_->GetContextGroup()
326 : new gles2::ContextGroup( 341 : new gles2::ContextGroup(
327 service_->gpu_preferences(), service_->mailbox_manager(), NULL, 342 service_->gpu_preferences(), service_->mailbox_manager(), NULL,
328 service_->shader_translator_cache(), 343 service_->shader_translator_cache(),
329 service_->framebuffer_completeness_cache(), feature_info, 344 service_->framebuffer_completeness_cache(), feature_info,
330 bind_generates_resource, nullptr, nullptr))); 345 bind_generates_resource, nullptr, nullptr);
346
347 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
331 348
332 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), 349 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(),
333 decoder_.get())); 350 decoder_.get()));
334 command_buffer->SetGetBufferChangeCallback(base::Bind( 351 command_buffer->SetGetBufferChangeCallback(base::Bind(
335 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); 352 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get())));
336 command_buffer_ = std::move(command_buffer); 353 command_buffer_ = std::move(command_buffer);
337 354
338 decoder_->set_engine(executor_.get()); 355 decoder_->set_engine(executor_.get());
339 356
340 if (!surface_.get()) { 357 if (!surface_.get()) {
341 if (params.is_offscreen) 358 if (params.is_offscreen) {
342 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); 359 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
343 else 360 } else {
344 surface_ = gl::init::CreateViewGLSurface(params.window); 361 surface_ = ImageTransportSurface::CreateNativeSurface(
362 gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window,
363 gl::GLSurface::SURFACE_DEFAULT);
364 if (!surface_ || !surface_->Initialize(gl::GLSurface::SURFACE_DEFAULT)) {
365 surface_ = nullptr;
366 DLOG(ERROR) << "Failed to create surface.";
367 return false;
368 }
369 }
345 } 370 }
346 371
347 if (!surface_.get()) { 372 if (!surface_.get()) {
348 LOG(ERROR) << "Could not create GLSurface."; 373 LOG(ERROR) << "Could not create GLSurface.";
349 DestroyOnGpuThread(); 374 DestroyOnGpuThread();
350 return false; 375 return false;
351 } 376 }
352 377
353 sync_point_order_data_ = SyncPointOrderData::Create(); 378 sync_point_order_data_ = SyncPointOrderData::Create();
354 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( 379 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
437 void InProcessCommandBuffer::Destroy() { 462 void InProcessCommandBuffer::Destroy() {
438 CheckSequencedThread(); 463 CheckSequencedThread();
439 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 464 client_thread_weak_ptr_factory_.InvalidateWeakPtrs();
440 gpu_control_client_ = nullptr; 465 gpu_control_client_ = nullptr;
441 base::WaitableEvent completion( 466 base::WaitableEvent completion(
442 base::WaitableEvent::ResetPolicy::MANUAL, 467 base::WaitableEvent::ResetPolicy::MANUAL,
443 base::WaitableEvent::InitialState::NOT_SIGNALED); 468 base::WaitableEvent::InitialState::NOT_SIGNALED);
444 bool result = false; 469 bool result = false;
445 base::Callback<bool(void)> destroy_task = base::Bind( 470 base::Callback<bool(void)> destroy_task = base::Bind(
446 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); 471 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
447 QueueTask( 472 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result,
448 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); 473 &completion));
449 completion.Wait(); 474 completion.Wait();
450 } 475 }
451 476
452 bool InProcessCommandBuffer::DestroyOnGpuThread() { 477 bool InProcessCommandBuffer::DestroyOnGpuThread() {
453 CheckSequencedThread(); 478 CheckSequencedThread();
454 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 479 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
455 command_buffer_.reset(); 480 command_buffer_.reset();
456 // Clean up GL resources if possible. 481 // Clean up GL resources if possible.
457 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); 482 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
458 if (decoder_) { 483 if (decoder_) {
459 decoder_->Destroy(have_context); 484 decoder_->Destroy(have_context);
460 decoder_.reset(); 485 decoder_.reset();
461 } 486 }
462 context_ = nullptr; 487 context_ = nullptr;
463 surface_ = nullptr; 488 surface_ = nullptr;
464 sync_point_client_ = nullptr; 489 sync_point_client_ = nullptr;
465 if (sync_point_order_data_) { 490 if (sync_point_order_data_) {
466 sync_point_order_data_->Destroy(); 491 sync_point_order_data_->Destroy();
467 sync_point_order_data_ = nullptr; 492 sync_point_order_data_ = nullptr;
468 } 493 }
469 gl_share_group_ = nullptr; 494 gl_share_group_ = nullptr;
470 495
496 base::AutoLock lock(task_queue_lock_);
497 std::queue<std::unique_ptr<GpuTask>> empty;
498 task_queue_.swap(empty);
499
471 return true; 500 return true;
472 } 501 }
473 502
474 void InProcessCommandBuffer::CheckSequencedThread() { 503 void InProcessCommandBuffer::CheckSequencedThread() {
475 DCHECK(!sequence_checker_ || sequence_checker_->CalledOnValidSequence()); 504 DCHECK(!sequence_checker_ || sequence_checker_->CalledOnValidSequence());
476 } 505 }
477 506
478 void InProcessCommandBuffer::OnContextLostOnGpuThread() { 507 void InProcessCommandBuffer::OnContextLostOnGpuThread() {
479 if (!origin_task_runner_) 508 if (!origin_task_runner_)
480 return OnContextLost(); // Just kidding, we're on the client thread. 509 return OnContextLost(); // Just kidding, we're on the client thread.
(...skipping 16 matching lines...) Expand all
497 } 526 }
498 527
499 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { 528 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
500 CheckSequencedThread(); 529 CheckSequencedThread();
501 base::AutoLock lock(state_after_last_flush_lock_); 530 base::AutoLock lock(state_after_last_flush_lock_);
502 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) 531 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
503 last_state_ = state_after_last_flush_; 532 last_state_ = state_after_last_flush_;
504 return last_state_; 533 return last_state_;
505 } 534 }
506 535
536 void InProcessCommandBuffer::QueueTask(bool out_of_order,
537 const base::Closure& task) {
538 if (out_of_order) {
539 service_->ScheduleTask(task);
540 return;
541 }
542 // Release the |task_queue_lock_| before calling ScheduleTask because
543 // the callback may get called immediately and attempt to acquire the lock.
544 SyncPointManager* sync_manager = service_->sync_point_manager();
545 uint32_t order_num =
546 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
547 {
548 base::AutoLock lock(task_queue_lock_);
549 task_queue_.push(base::MakeUnique<GpuTask>(task, order_num));
550 }
551 service_->ScheduleTask(base::Bind(
552 &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
553 }
554
555 void InProcessCommandBuffer::ProcessTasksOnGpuThread() {
556 while (executor_->scheduled()) {
557 base::AutoLock lock(task_queue_lock_);
558 if (task_queue_.empty())
559 break;
560 GpuTask* task = task_queue_.front().get();
561 sync_point_order_data_->BeginProcessingOrderNumber(task->order_number);
562 task->callback.Run();
563 if (!executor_->scheduled() && !service_->BlockThreadOnWaitSyncToken()) {
564 sync_point_order_data_->PauseProcessingOrderNumber(task->order_number);
565 return;
566 }
567 sync_point_order_data_->FinishProcessingOrderNumber(task->order_number);
568 task_queue_.pop();
569 }
570 }
571
507 CommandBuffer::State InProcessCommandBuffer::GetLastState() { 572 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
508 CheckSequencedThread(); 573 CheckSequencedThread();
509 return last_state_; 574 return last_state_;
510 } 575 }
511 576
512 int32_t InProcessCommandBuffer::GetLastToken() { 577 int32_t InProcessCommandBuffer::GetLastToken() {
513 CheckSequencedThread(); 578 CheckSequencedThread();
514 GetStateFast(); 579 GetStateFast();
515 return last_state_.token; 580 return last_state_.token;
516 } 581 }
517 582
518 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset, 583 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
519 uint32_t order_num) {
520 CheckSequencedThread(); 584 CheckSequencedThread();
521 ScopedEvent handle_flush(&flush_event_); 585 ScopedEvent handle_flush(&flush_event_);
522 base::AutoLock lock(command_buffer_lock_); 586 base::AutoLock lock(command_buffer_lock_);
523 587
524 { 588 {
525 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
526 order_num);
527 command_buffer_->Flush(put_offset); 589 command_buffer_->Flush(put_offset);
528 { 590 {
529 // Update state before signaling the flush event. 591 // Update state before signaling the flush event.
530 base::AutoLock lock(state_after_last_flush_lock_); 592 base::AutoLock lock(state_after_last_flush_lock_);
531 state_after_last_flush_ = command_buffer_->GetLastState(); 593 state_after_last_flush_ = command_buffer_->GetLastState();
532 } 594 }
533
534 // Currently the in process command buffer does not support being
535 // descheduled, if it does we would need to back off on calling the finish
536 // processing number function until the message is rescheduled and finished
537 // processing. This DCHECK is to enforce this.
538 DCHECK(error::IsError(state_after_last_flush_.error) ||
539 put_offset == state_after_last_flush_.get_offset);
540 } 595 }
541 596
542 // If we've processed all pending commands but still have pending queries, 597 // If we've processed all pending commands but still have pending queries,
543 // pump idle work until the query is passed. 598 // pump idle work until the query is passed.
544 if (put_offset == state_after_last_flush_.get_offset && 599 if (put_offset == state_after_last_flush_.get_offset &&
545 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { 600 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) {
546 ScheduleDelayedWorkOnGpuThread(); 601 ScheduleDelayedWorkOnGpuThread();
547 } 602 }
548 } 603 }
549 604
(...skipping 21 matching lines...) Expand all
571 } 626 }
572 627
573 void InProcessCommandBuffer::Flush(int32_t put_offset) { 628 void InProcessCommandBuffer::Flush(int32_t put_offset) {
574 CheckSequencedThread(); 629 CheckSequencedThread();
575 if (last_state_.error != gpu::error::kNoError) 630 if (last_state_.error != gpu::error::kNoError)
576 return; 631 return;
577 632
578 if (last_put_offset_ == put_offset) 633 if (last_put_offset_ == put_offset)
579 return; 634 return;
580 635
581 SyncPointManager* sync_manager = service_->sync_point_manager();
582 const uint32_t order_num =
583 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
584 last_put_offset_ = put_offset; 636 last_put_offset_ = put_offset;
585 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 637 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
586 gpu_thread_weak_ptr_, put_offset, order_num); 638 gpu_thread_weak_ptr_, put_offset);
587 QueueTask(task); 639 QueueTask(false, task);
588 640
589 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; 641 flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
590 } 642 }
591 643
592 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { 644 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
593 Flush(put_offset); 645 Flush(put_offset);
594 } 646 }
595 647
596 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { 648 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) {
597 CheckSequencedThread(); 649 CheckSequencedThread();
598 while (!InRange(start, end, GetLastToken()) && 650 while (!InRange(start, end, GetLastToken()) &&
599 last_state_.error == gpu::error::kNoError) 651 last_state_.error == gpu::error::kNoError) {
600 flush_event_.Wait(); 652 flush_event_.Wait();
653 }
601 } 654 }
602 655
603 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, 656 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start,
604 int32_t end) { 657 int32_t end) {
605 CheckSequencedThread(); 658 CheckSequencedThread();
606 659
607 GetStateFast(); 660 GetStateFast();
608 while (!InRange(start, end, last_state_.get_offset) && 661 while (!InRange(start, end, last_state_.get_offset) &&
609 last_state_.error == gpu::error::kNoError) { 662 last_state_.error == gpu::error::kNoError) {
610 flush_event_.Wait(); 663 flush_event_.Wait();
611 GetStateFast(); 664 GetStateFast();
612 } 665 }
613 } 666 }
614 667
615 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { 668 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
616 CheckSequencedThread(); 669 CheckSequencedThread();
617 if (last_state_.error != gpu::error::kNoError) 670 if (last_state_.error != gpu::error::kNoError)
618 return; 671 return;
619 672
620 base::WaitableEvent completion( 673 base::WaitableEvent completion(
621 base::WaitableEvent::ResetPolicy::MANUAL, 674 base::WaitableEvent::ResetPolicy::MANUAL,
622 base::WaitableEvent::InitialState::NOT_SIGNALED); 675 base::WaitableEvent::InitialState::NOT_SIGNALED);
623 base::Closure task = 676 base::Closure task =
624 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, 677 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
625 base::Unretained(this), shm_id, &completion); 678 base::Unretained(this), shm_id, &completion);
626 QueueTask(task); 679 QueueTask(false, task);
627 completion.Wait(); 680 completion.Wait();
628 681
629 { 682 {
630 base::AutoLock lock(state_after_last_flush_lock_); 683 base::AutoLock lock(state_after_last_flush_lock_);
631 state_after_last_flush_ = command_buffer_->GetLastState(); 684 state_after_last_flush_ = command_buffer_->GetLastState();
632 } 685 }
633 } 686 }
634 687
635 void InProcessCommandBuffer::SetGetBufferOnGpuThread( 688 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
636 int32_t shm_id, 689 int32_t shm_id,
(...skipping 11 matching lines...) Expand all
648 base::AutoLock lock(command_buffer_lock_); 701 base::AutoLock lock(command_buffer_lock_);
649 return command_buffer_->CreateTransferBuffer(size, id); 702 return command_buffer_->CreateTransferBuffer(size, id);
650 } 703 }
651 704
652 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { 705 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
653 CheckSequencedThread(); 706 CheckSequencedThread();
654 base::Closure task = 707 base::Closure task =
655 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, 708 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
656 base::Unretained(this), id); 709 base::Unretained(this), id);
657 710
658 QueueTask(task); 711 QueueTask(false, task);
659 } 712 }
660 713
661 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { 714 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
662 base::AutoLock lock(command_buffer_lock_); 715 base::AutoLock lock(command_buffer_lock_);
663 command_buffer_->DestroyTransferBuffer(id); 716 command_buffer_->DestroyTransferBuffer(id);
664 } 717 }
665 718
666 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { 719 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) {
667 gpu_control_client_ = client; 720 gpu_control_client_ = client;
668 } 721 }
(...skipping 20 matching lines...) Expand all
689 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( 742 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
690 internalformat, gpu_memory_buffer->GetFormat())); 743 internalformat, gpu_memory_buffer->GetFormat()));
691 744
692 // This handle is owned by the GPU thread and must be passed to it or it 745 // This handle is owned by the GPU thread and must be passed to it or it
693 // will leak. In otherwords, do not early out on error between here and the 746 // will leak. In otherwords, do not early out on error between here and the
694 // queuing of the CreateImage task below. 747 // queuing of the CreateImage task below.
695 bool requires_sync_point = false; 748 bool requires_sync_point = false;
696 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( 749 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread(
697 gpu_memory_buffer->GetHandle(), &requires_sync_point); 750 gpu_memory_buffer->GetHandle(), &requires_sync_point);
698 751
699 SyncPointManager* sync_manager = service_->sync_point_manager();
700 const uint32_t order_num =
701 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
702
703 uint64_t fence_sync = 0; 752 uint64_t fence_sync = 0;
704 if (requires_sync_point) { 753 if (requires_sync_point) {
705 fence_sync = GenerateFenceSyncRelease(); 754 fence_sync = GenerateFenceSyncRelease();
706 755
707 // Previous fence syncs should be flushed already. 756 // Previous fence syncs should be flushed already.
708 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); 757 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
709 } 758 }
710 759
711 QueueTask(base::Bind( 760 QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
712 &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this), 761 base::Unretained(this), new_id, handle,
713 new_id, handle, gfx::Size(base::checked_cast<int>(width), 762 gfx::Size(base::checked_cast<int>(width),
714 base::checked_cast<int>(height)), 763 base::checked_cast<int>(height)),
715 gpu_memory_buffer->GetFormat(), 764 gpu_memory_buffer->GetFormat(),
716 base::checked_cast<uint32_t>(internalformat), order_num, fence_sync)); 765 base::checked_cast<uint32_t>(internalformat),
766 fence_sync));
717 767
718 if (fence_sync) { 768 if (fence_sync) {
719 flushed_fence_sync_release_ = fence_sync; 769 flushed_fence_sync_release_ = fence_sync;
720 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), 770 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
721 GetCommandBufferID(), fence_sync); 771 GetCommandBufferID(), fence_sync);
722 sync_token.SetVerifyFlush(); 772 sync_token.SetVerifyFlush();
723 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, 773 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
724 sync_token); 774 sync_token);
725 } 775 }
726 776
727 return new_id; 777 return new_id;
728 } 778 }
729 779
730 void InProcessCommandBuffer::CreateImageOnGpuThread( 780 void InProcessCommandBuffer::CreateImageOnGpuThread(
731 int32_t id, 781 int32_t id,
732 const gfx::GpuMemoryBufferHandle& handle, 782 const gfx::GpuMemoryBufferHandle& handle,
733 const gfx::Size& size, 783 const gfx::Size& size,
734 gfx::BufferFormat format, 784 gfx::BufferFormat format,
735 uint32_t internalformat, 785 uint32_t internalformat,
736 uint32_t order_num,
737 uint64_t fence_sync) { 786 uint64_t fence_sync) {
738 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
739 order_num);
740 if (!decoder_) 787 if (!decoder_)
741 return; 788 return;
742 789
743 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 790 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
744 DCHECK(image_manager); 791 DCHECK(image_manager);
745 if (image_manager->LookupImage(id)) { 792 if (image_manager->LookupImage(id)) {
746 LOG(ERROR) << "Image already exists with same ID."; 793 LOG(ERROR) << "Image already exists with same ID.";
747 return; 794 return;
748 } 795 }
749 796
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 } 835 }
789 836
790 if (fence_sync) { 837 if (fence_sync) {
791 sync_point_client_->ReleaseFenceSync(fence_sync); 838 sync_point_client_->ReleaseFenceSync(fence_sync);
792 } 839 }
793 } 840 }
794 841
795 void InProcessCommandBuffer::DestroyImage(int32_t id) { 842 void InProcessCommandBuffer::DestroyImage(int32_t id) {
796 CheckSequencedThread(); 843 CheckSequencedThread();
797 844
798 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, 845 QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
799 base::Unretained(this), id)); 846 base::Unretained(this), id));
800 } 847 }
801 848
802 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { 849 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
803 if (!decoder_) 850 if (!decoder_)
804 return; 851 return;
805 852
806 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 853 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
807 DCHECK(image_manager); 854 DCHECK(image_manager);
808 if (!image_manager->LookupImage(id)) { 855 if (!image_manager->LookupImage(id)) {
809 LOG(ERROR) << "Image with ID doesn't exist."; 856 LOG(ERROR) << "Image with ID doesn't exist.";
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
843 mailbox_manager->PushTextureUpdates(sync_token); 890 mailbox_manager->PushTextureUpdates(sync_token);
844 } 891 }
845 892
846 sync_point_client_->ReleaseFenceSync(release); 893 sync_point_client_->ReleaseFenceSync(release);
847 } 894 }
848 895
849 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( 896 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
850 gpu::CommandBufferNamespace namespace_id, 897 gpu::CommandBufferNamespace namespace_id,
851 gpu::CommandBufferId command_buffer_id, 898 gpu::CommandBufferId command_buffer_id,
852 uint64_t release) { 899 uint64_t release) {
900 DCHECK(!waiting_for_sync_point_);
853 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); 901 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
854 DCHECK(sync_point_manager); 902 DCHECK(sync_point_manager);
855 903
856 scoped_refptr<gpu::SyncPointClientState> release_state = 904 scoped_refptr<gpu::SyncPointClientState> release_state =
857 sync_point_manager->GetSyncPointClientState(namespace_id, 905 sync_point_manager->GetSyncPointClientState(namespace_id,
858 command_buffer_id); 906 command_buffer_id);
859 907
860 if (!release_state) 908 if (!release_state)
861 return true; 909 return true;
862 910
863 if (!release_state->IsFenceSyncReleased(release)) { 911 if (service_->BlockThreadOnWaitSyncToken()) {
864 // Use waitable event which is signalled when the release fence is released. 912 if (!release_state->IsFenceSyncReleased(release)) {
865 sync_point_client_->Wait( 913 // Use waitable event which is signalled when the release fence is
866 release_state.get(), release, 914 // released.
867 base::Bind(&base::WaitableEvent::Signal, 915 sync_point_client_->Wait(
868 base::Unretained(&fence_sync_wait_event_))); 916 release_state.get(), release,
869 fence_sync_wait_event_.Wait(); 917 base::Bind(&base::WaitableEvent::Signal,
918 base::Unretained(&fence_sync_wait_event_)));
919 fence_sync_wait_event_.Wait();
920 }
921
922 gles2::MailboxManager* mailbox_manager =
923 decoder_->GetContextGroup()->mailbox_manager();
924 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
925 mailbox_manager->PullTextureUpdates(sync_token);
926 return true;
870 } 927 }
871 928
929 if (release_state->IsFenceSyncReleased(release)) {
930 gles2::MailboxManager* mailbox_manager =
931 decoder_->GetContextGroup()->mailbox_manager();
932 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
933 mailbox_manager->PullTextureUpdates(sync_token);
934 return true;
935 }
936
937 waiting_for_sync_point_ = true;
938 sync_point_client_->Wait(
939 release_state.get(), release,
940 base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted,
941 gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id,
942 command_buffer_id, release));
943
944 if (!waiting_for_sync_point_)
945 return true;
946
947 executor_->SetScheduled(false);
948 return false;
949 }
950
951 void InProcessCommandBuffer::OnWaitFenceSyncCompleted(
952 CommandBufferNamespace namespace_id,
953 CommandBufferId command_buffer_id,
954 uint64_t release) {
955 DCHECK(waiting_for_sync_point_);
872 gles2::MailboxManager* mailbox_manager = 956 gles2::MailboxManager* mailbox_manager =
873 decoder_->GetContextGroup()->mailbox_manager(); 957 decoder_->GetContextGroup()->mailbox_manager();
874 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); 958 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
875 mailbox_manager->PullTextureUpdates(sync_token); 959 mailbox_manager->PullTextureUpdates(sync_token);
876 return true; 960 waiting_for_sync_point_ = false;
961 executor_->SetScheduled(true);
962 QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
963 gpu_thread_weak_ptr_, last_put_offset_));
877 } 964 }
878 965
879 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { 966 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
880 NOTIMPLEMENTED(); 967 if (!service_->BlockThreadOnWaitSyncToken()) {
968 DCHECK(executor_->scheduled());
969 DCHECK(executor_->HasPollingWork());
970
971 executor_->SetScheduled(false);
972 }
881 } 973 }
882 974
883 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { 975 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
884 NOTIMPLEMENTED(); 976 if (!service_->BlockThreadOnWaitSyncToken()) {
977 DCHECK(!executor_->scheduled());
978
979 executor_->SetScheduled(true);
980 ProcessTasksOnGpuThread();
981 }
885 } 982 }
886 983
887 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( 984 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
888 const SyncToken& sync_token, 985 const SyncToken& sync_token,
889 const base::Closure& callback) { 986 const base::Closure& callback) {
890 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); 987 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
891 DCHECK(sync_point_manager); 988 DCHECK(sync_point_manager);
892 989
893 scoped_refptr<gpu::SyncPointClientState> release_state = 990 scoped_refptr<gpu::SyncPointClientState> release_state =
894 sync_point_manager->GetSyncPointClientState( 991 sync_point_manager->GetSyncPointClientState(
895 sync_token.namespace_id(), sync_token.command_buffer_id()); 992 sync_token.namespace_id(), sync_token.command_buffer_id());
896 993
897 if (!release_state) { 994 if (!release_state) {
898 callback.Run(); 995 callback.Run();
899 return; 996 return;
900 } 997 }
901 998
902 sync_point_client_->WaitOutOfOrder( 999 sync_point_client_->WaitOutOfOrder(
903 release_state.get(), sync_token.release_count(), WrapCallback(callback)); 1000 release_state.get(), sync_token.release_count(), WrapCallback(callback));
904 } 1001 }
905 1002
906 void InProcessCommandBuffer::SignalQuery(unsigned query_id, 1003 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
907 const base::Closure& callback) { 1004 const base::Closure& callback) {
908 CheckSequencedThread(); 1005 CheckSequencedThread();
909 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, 1006 QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
910 base::Unretained(this), query_id, 1007 base::Unretained(this), query_id,
911 WrapCallback(callback))); 1008 WrapCallback(callback)));
912 } 1009 }
913 1010
914 void InProcessCommandBuffer::SignalQueryOnGpuThread( 1011 void InProcessCommandBuffer::SignalQueryOnGpuThread(
915 unsigned query_id, 1012 unsigned query_id,
916 const base::Closure& callback) { 1013 const base::Closure& callback) {
917 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); 1014 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
918 DCHECK(query_manager_); 1015 DCHECK(query_manager_);
919 1016
920 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); 1017 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
921 if (!query) 1018 if (!query)
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 return release <= flushed_fence_sync_release_; 1054 return release <= flushed_fence_sync_release_;
958 } 1055 }
959 1056
960 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { 1057 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) {
961 return IsFenceSyncFlushed(release); 1058 return IsFenceSyncFlushed(release);
962 } 1059 }
963 1060
964 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, 1061 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
965 const base::Closure& callback) { 1062 const base::Closure& callback) {
966 CheckSequencedThread(); 1063 CheckSequencedThread();
967 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, 1064 QueueTask(
968 base::Unretained(this), sync_token, 1065 true,
969 WrapCallback(callback))); 1066 base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
1067 base::Unretained(this), sync_token, WrapCallback(callback)));
970 } 1068 }
971 1069
972 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( 1070 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
973 const SyncToken* sync_token) { 1071 const SyncToken* sync_token) {
974 return sync_token->namespace_id() == GetNamespaceID(); 1072 return sync_token->namespace_id() == GetNamespaceID();
975 } 1073 }
976 1074
1075 #if defined(OS_WIN)
1076 void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
1077 SurfaceHandle parent_window,
1078 SurfaceHandle child_window) {
1079 // TODO(fsamuel): Implement this.
1080 }
1081 #endif
1082
1083 void InProcessCommandBuffer::DidSwapBuffersComplete(
1084 SwapBuffersCompleteParams params) {
1085 #if defined(OS_MACOSX)
1086 gpu::GpuProcessHostedCALayerTreeParamsMac params_mac;
1087 params_mac.ca_context_id = params.ca_context_id;
1088 params_mac.fullscreen_low_power_ca_context_valid =
1089 params.fullscreen_low_power_ca_context_valid;
1090 params_mac.fullscreen_low_power_ca_context_id =
1091 params.fullscreen_low_power_ca_context_id;
1092 params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface));
1093 params_mac.pixel_size = params.pixel_size;
1094 params_mac.scale_factor = params.scale_factor;
1095 params_mac.responses = std::move(params.in_use_responses);
1096 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = &params_mac;
1097 #else
1098 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr;
1099 #endif
1100 if (!swap_buffers_completion_callback_.is_null()) {
1101 if (!ui::LatencyInfo::Verify(
1102 params.latency_info,
1103 "InProcessCommandBuffer::DidSwapBuffersComplete")) {
1104 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
1105 params.result, mac_frame_ptr);
1106 } else {
1107 swap_buffers_completion_callback_.Run(params.latency_info, params.result,
1108 mac_frame_ptr);
1109 }
1110 }
1111 }
1112
1113 const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
1114 return context_group_->feature_info();
1115 }
1116
1117 void InProcessCommandBuffer::SetLatencyInfoCallback(
1118 const LatencyInfoCallback& callback) {
1119 // TODO(fsamuel): Implement this.
1120 }
1121
1122 void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase,
1123 base::TimeDelta interval) {
1124 if (!update_vsync_parameters_completion_callback_.is_null())
1125 update_vsync_parameters_completion_callback_.Run(timebase, interval);
1126 }
1127
1128 void InProcessCommandBuffer::SetSwapBuffersCompletionCallback(
1129 const SwapBuffersCompletionCallback& callback) {
1130 swap_buffers_completion_callback_ = callback;
1131 }
1132
1133 void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
1134 const UpdateVSyncParametersCallback& callback) {
1135 update_vsync_parameters_completion_callback_ = callback;
1136 }
1137
977 gpu::error::Error InProcessCommandBuffer::GetLastError() { 1138 gpu::error::Error InProcessCommandBuffer::GetLastError() {
978 CheckSequencedThread(); 1139 CheckSequencedThread();
979 return last_state_.error; 1140 return last_state_.error;
980 } 1141 }
981 1142
982 namespace { 1143 namespace {
983 1144
984 void PostCallback( 1145 void PostCallback(
985 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, 1146 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
986 const base::Closure& callback) { 1147 const base::Closure& callback) {
(...skipping 21 matching lines...) Expand all
1008 base::Closure callback_on_client_thread = 1169 base::Closure callback_on_client_thread =
1009 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback)); 1170 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
1010 base::Closure wrapped_callback = 1171 base::Closure wrapped_callback =
1011 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet() 1172 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet()
1012 ? base::ThreadTaskRunnerHandle::Get() 1173 ? base::ThreadTaskRunnerHandle::Get()
1013 : nullptr, 1174 : nullptr,
1014 callback_on_client_thread); 1175 callback_on_client_thread);
1015 return wrapped_callback; 1176 return wrapped_callback;
1016 } 1177 }
1017 1178
1018 GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager) 1179 InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback,
1019 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) { 1180 uint32_t order_number)
1020 Start(); 1181 : callback(callback), order_number(order_number) {}
1021 }
1022 1182
1023 GpuInProcessThread::~GpuInProcessThread() { 1183 InProcessCommandBuffer::GpuTask::~GpuTask() {}
1024 Stop();
1025 }
1026
1027 void GpuInProcessThread::AddRef() const {
1028 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
1029 }
1030 void GpuInProcessThread::Release() const {
1031 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
1032 }
1033
1034 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
1035 task_runner()->PostTask(FROM_HERE, task);
1036 }
1037
1038 void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) {
1039 // Match delay with GpuCommandBufferStub.
1040 task_runner()->PostDelayedTask(FROM_HERE, callback,
1041 base::TimeDelta::FromMilliseconds(2));
1042 }
1043
1044 bool GpuInProcessThread::UseVirtualizedGLContexts() {
1045 return false;
1046 }
1047
1048 scoped_refptr<gles2::ShaderTranslatorCache>
1049 GpuInProcessThread::shader_translator_cache() {
1050 if (!shader_translator_cache_.get()) {
1051 shader_translator_cache_ =
1052 new gpu::gles2::ShaderTranslatorCache(gpu_preferences());
1053 }
1054 return shader_translator_cache_;
1055 }
1056
1057 scoped_refptr<gles2::FramebufferCompletenessCache>
1058 GpuInProcessThread::framebuffer_completeness_cache() {
1059 if (!framebuffer_completeness_cache_.get())
1060 framebuffer_completeness_cache_ =
1061 new gpu::gles2::FramebufferCompletenessCache;
1062 return framebuffer_completeness_cache_;
1063 }
1064
1065 SyncPointManager* GpuInProcessThread::sync_point_manager() {
1066 return sync_point_manager_;
1067 }
1068 1184
1069 } // namespace gpu 1185 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/in_process_command_buffer.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698