Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(484)

Side by Side Diff: gpu/ipc/in_process_command_buffer.cc

Issue 2498053004: Add InProcessContextProvider and update InProcessCommandBuffer (Closed)
Patch Set: Add missing dep in blimp Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/in_process_command_buffer.h" 5 #include "gpu/ipc/in_process_command_buffer.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <queue> 10 #include <queue>
11 #include <set> 11 #include <set>
12 #include <utility> 12 #include <utility>
13 13
14 #include "base/bind.h" 14 #include "base/bind.h"
15 #include "base/bind_helpers.h" 15 #include "base/bind_helpers.h"
16 #include "base/command_line.h" 16 #include "base/command_line.h"
17 #include "base/lazy_instance.h" 17 #include "base/lazy_instance.h"
18 #include "base/location.h" 18 #include "base/location.h"
19 #include "base/logging.h" 19 #include "base/logging.h"
20 #include "base/memory/ptr_util.h"
20 #include "base/memory/weak_ptr.h" 21 #include "base/memory/weak_ptr.h"
21 #include "base/numerics/safe_conversions.h" 22 #include "base/numerics/safe_conversions.h"
22 #include "base/sequence_checker.h" 23 #include "base/sequence_checker.h"
23 #include "base/single_thread_task_runner.h" 24 #include "base/single_thread_task_runner.h"
24 #include "base/threading/thread_task_runner_handle.h" 25 #include "base/threading/thread_task_runner_handle.h"
25 #include "gpu/command_buffer/client/gpu_control_client.h" 26 #include "gpu/command_buffer/client/gpu_control_client.h"
26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 27 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" 28 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
28 #include "gpu/command_buffer/common/sync_token.h" 29 #include "gpu/command_buffer/common/sync_token.h"
29 #include "gpu/command_buffer/service/command_buffer_service.h" 30 #include "gpu/command_buffer/service/command_buffer_service.h"
30 #include "gpu/command_buffer/service/command_executor.h" 31 #include "gpu/command_buffer/service/command_executor.h"
31 #include "gpu/command_buffer/service/context_group.h" 32 #include "gpu/command_buffer/service/context_group.h"
32 #include "gpu/command_buffer/service/gl_context_virtual.h" 33 #include "gpu/command_buffer/service/gl_context_virtual.h"
33 #include "gpu/command_buffer/service/gpu_preferences.h" 34 #include "gpu/command_buffer/service/gpu_preferences.h"
34 #include "gpu/command_buffer/service/image_factory.h" 35 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/image_manager.h" 36 #include "gpu/command_buffer/service/image_manager.h"
36 #include "gpu/command_buffer/service/mailbox_manager.h" 37 #include "gpu/command_buffer/service/mailbox_manager.h"
37 #include "gpu/command_buffer/service/memory_program_cache.h" 38 #include "gpu/command_buffer/service/memory_program_cache.h"
38 #include "gpu/command_buffer/service/memory_tracking.h" 39 #include "gpu/command_buffer/service/memory_tracking.h"
39 #include "gpu/command_buffer/service/query_manager.h" 40 #include "gpu/command_buffer/service/query_manager.h"
40 #include "gpu/command_buffer/service/service_utils.h" 41 #include "gpu/command_buffer/service/service_utils.h"
41 #include "gpu/command_buffer/service/sync_point_manager.h" 42 #include "gpu/command_buffer/service/sync_point_manager.h"
42 #include "gpu/command_buffer/service/transfer_buffer_manager.h" 43 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
44 #include "gpu/ipc/gpu_in_process_thread_service.h"
45 #include "gpu/ipc/service/image_transport_surface.h"
43 #include "ui/gfx/geometry/size.h" 46 #include "ui/gfx/geometry/size.h"
44 #include "ui/gl/gl_context.h" 47 #include "ui/gl/gl_context.h"
45 #include "ui/gl/gl_image.h" 48 #include "ui/gl/gl_image.h"
46 #include "ui/gl/gl_image_shared_memory.h" 49 #include "ui/gl/gl_image_shared_memory.h"
47 #include "ui/gl/gl_share_group.h" 50 #include "ui/gl/gl_share_group.h"
48 #include "ui/gl/init/gl_factory.h" 51 #include "ui/gl/init/gl_factory.h"
49 52
50 #if defined(OS_WIN) 53 #if defined(OS_WIN)
51 #include <windows.h> 54 #include <windows.h>
52 #include "base/process/process_handle.h" 55 #include "base/process/process_handle.h"
53 #endif 56 #endif
54 57
58 #if defined(OS_MACOSX)
59 #include "gpu/ipc/client/gpu_process_hosted_ca_layer_tree_params.h"
60 #endif
61
55 namespace gpu { 62 namespace gpu {
56 63
57 namespace { 64 namespace {
58 65
59 base::StaticAtomicSequenceNumber g_next_command_buffer_id; 66 base::StaticAtomicSequenceNumber g_next_command_buffer_id;
60 67
61 template <typename T> 68 template <typename T>
62 static void RunTaskWithResult(base::Callback<T(void)> task, 69 static void RunTaskWithResult(base::Callback<T(void)> task,
63 T* result, 70 T* result,
64 base::WaitableEvent* completion) { 71 base::WaitableEvent* completion) {
65 *result = task.Run(); 72 *result = task.Run();
66 completion->Signal(); 73 completion->Signal();
67 } 74 }
68 75
69 struct ScopedOrderNumberProcessor { 76 class GpuInProcessThreadHolder : public base::Thread {
70 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) 77 public:
71 : order_data_(order_data), order_num_(order_num) { 78 GpuInProcessThreadHolder()
72 order_data_->BeginProcessingOrderNumber(order_num_); 79 : base::Thread("GpuThread"),
80 sync_point_manager_(new SyncPointManager(false)) {
81 Start();
73 } 82 }
74 83
75 ~ScopedOrderNumberProcessor() { 84 ~GpuInProcessThreadHolder() override { Stop(); }
76 order_data_->FinishProcessingOrderNumber(order_num_); 85
86 const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
87 if (!gpu_thread_service_) {
88 gpu_thread_service_ = new GpuInProcessThreadService(
89 task_runner(), sync_point_manager_.get(), nullptr, nullptr);
90 }
91 return gpu_thread_service_;
77 } 92 }
78 93
79 private: 94 private:
80 SyncPointOrderData* order_data_; 95 std::unique_ptr<SyncPointManager> sync_point_manager_;
81 uint32_t order_num_; 96 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
82 };
83
84 struct GpuInProcessThreadHolder {
85 GpuInProcessThreadHolder()
86 : sync_point_manager(new SyncPointManager(false)),
87 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {}
88 std::unique_ptr<SyncPointManager> sync_point_manager;
89 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
90 }; 97 };
91 98
92 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = 99 base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
93 LAZY_INSTANCE_INITIALIZER; 100 LAZY_INSTANCE_INITIALIZER;
94 101
95 class ScopedEvent { 102 class ScopedEvent {
96 public: 103 public:
97 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {} 104 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {}
98 ~ScopedEvent() { event_->Signal(); } 105 ~ScopedEvent() { event_->Signal(); }
99 106
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
134 if (service) 141 if (service)
135 return service; 142 return service;
136 143
137 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is 144 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
138 // instantiated before we create the GPU thread, otherwise shutdown order will 145 // instantiated before we create the GPU thread, otherwise shutdown order will
139 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop, 146 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
140 // and when the message loop is shutdown, it will recreate 147 // and when the message loop is shutdown, it will recreate
141 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager, 148 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
142 // which causes a deadlock because it's already locked. 149 // which causes a deadlock because it's already locked.
143 base::ThreadTaskRunnerHandle::IsSet(); 150 base::ThreadTaskRunnerHandle::IsSet();
144 return g_default_service.Get().gpu_thread; 151 return g_default_service.Get().GetGpuThreadService();
145 } 152 }
146 153
147 } // anonyous namespace 154 } // anonyous namespace
148 155
149 InProcessCommandBuffer::Service::Service() 156 InProcessCommandBuffer::Service::Service()
150 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} 157 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
151 158
152 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences) 159 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
153 : gpu_preferences_(gpu_preferences), 160 : gpu_preferences_(gpu_preferences),
154 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} 161 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
155 162
163 InProcessCommandBuffer::Service::Service(
164 gpu::gles2::MailboxManager* mailbox_manager,
165 scoped_refptr<gl::GLShareGroup> share_group)
166 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
167 mailbox_manager_(mailbox_manager),
168 share_group_(share_group) {}
169
156 InProcessCommandBuffer::Service::~Service() {} 170 InProcessCommandBuffer::Service::~Service() {}
157 171
158 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { 172 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() {
159 return gpu_preferences_; 173 return gpu_preferences_;
160 } 174 }
161 175
162 const gpu::GpuDriverBugWorkarounds& 176 const gpu::GpuDriverBugWorkarounds&
163 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { 177 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() {
164 return gpu_driver_bug_workarounds_; 178 return gpu_driver_bug_workarounds_;
165 } 179 }
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
244 258
245 if (!MakeCurrent()) 259 if (!MakeCurrent())
246 return; 260 return;
247 261
248 executor_->PutChanged(); 262 executor_->PutChanged();
249 } 263 }
250 264
251 bool InProcessCommandBuffer::Initialize( 265 bool InProcessCommandBuffer::Initialize(
252 scoped_refptr<gl::GLSurface> surface, 266 scoped_refptr<gl::GLSurface> surface,
253 bool is_offscreen, 267 bool is_offscreen,
254 gfx::AcceleratedWidget window, 268 SurfaceHandle window,
255 const gles2::ContextCreationAttribHelper& attribs, 269 const gles2::ContextCreationAttribHelper& attribs,
256 InProcessCommandBuffer* share_group, 270 InProcessCommandBuffer* share_group,
257 GpuMemoryBufferManager* gpu_memory_buffer_manager, 271 GpuMemoryBufferManager* gpu_memory_buffer_manager,
258 ImageFactory* image_factory, 272 ImageFactory* image_factory,
259 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { 273 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
260 DCHECK(!share_group || service_.get() == share_group->service_.get()); 274 DCHECK(!share_group || service_.get() == share_group->service_.get());
261 275
262 if (surface) { 276 if (surface) {
263 // If a surface is provided, we are running in a webview and should not have 277 // If a surface is provided, we are running in a webview and should not have
264 // a task runner. 278 // a task runner.
(...skipping 14 matching lines...) Expand all
279 &capabilities, share_group, image_factory); 293 &capabilities, share_group, image_factory);
280 294
281 base::Callback<bool(void)> init_task = 295 base::Callback<bool(void)> init_task =
282 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, 296 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
283 base::Unretained(this), params); 297 base::Unretained(this), params);
284 298
285 base::WaitableEvent completion( 299 base::WaitableEvent completion(
286 base::WaitableEvent::ResetPolicy::MANUAL, 300 base::WaitableEvent::ResetPolicy::MANUAL,
287 base::WaitableEvent::InitialState::NOT_SIGNALED); 301 base::WaitableEvent::InitialState::NOT_SIGNALED);
288 bool result = false; 302 bool result = false;
289 QueueTask( 303 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result,
290 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); 304 &completion));
291 completion.Wait(); 305 completion.Wait();
292 306
293 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; 307 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
294 308
295 if (result) 309 if (result)
296 capabilities_ = capabilities; 310 capabilities_ = capabilities;
297 311
298 return result; 312 return result;
299 } 313 }
300 314
(...skipping 12 matching lines...) Expand all
313 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); 327 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_));
314 command_buffer->SetParseErrorCallback(base::Bind( 328 command_buffer->SetParseErrorCallback(base::Bind(
315 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); 329 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_));
316 330
317 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ 331 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_
318 : service_->share_group(); 332 : service_->share_group();
319 333
320 bool bind_generates_resource = false; 334 bool bind_generates_resource = false;
321 scoped_refptr<gles2::FeatureInfo> feature_info = 335 scoped_refptr<gles2::FeatureInfo> feature_info =
322 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); 336 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds());
323 decoder_.reset(gles2::GLES2Decoder::Create( 337
338 context_group_ =
324 params.context_group 339 params.context_group
325 ? params.context_group->decoder_->GetContextGroup() 340 ? params.context_group->decoder_->GetContextGroup()
326 : new gles2::ContextGroup( 341 : new gles2::ContextGroup(
327 service_->gpu_preferences(), service_->mailbox_manager(), NULL, 342 service_->gpu_preferences(), service_->mailbox_manager(), NULL,
328 service_->shader_translator_cache(), 343 service_->shader_translator_cache(),
329 service_->framebuffer_completeness_cache(), feature_info, 344 service_->framebuffer_completeness_cache(), feature_info,
330 bind_generates_resource, nullptr, nullptr))); 345 bind_generates_resource, nullptr, nullptr);
346
347 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
331 348
332 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), 349 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(),
333 decoder_.get())); 350 decoder_.get()));
334 command_buffer->SetGetBufferChangeCallback(base::Bind( 351 command_buffer->SetGetBufferChangeCallback(base::Bind(
335 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); 352 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get())));
336 command_buffer_ = std::move(command_buffer); 353 command_buffer_ = std::move(command_buffer);
337 354
338 decoder_->set_engine(executor_.get()); 355 decoder_->set_engine(executor_.get());
339 356
340 if (!surface_.get()) { 357 if (!surface_.get()) {
341 if (params.is_offscreen) 358 if (params.is_offscreen) {
342 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); 359 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
343 else 360 } else {
344 surface_ = gl::init::CreateViewGLSurface(params.window); 361 surface_ = ImageTransportSurface::CreateNativeSurface(
362 gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window,
363 gl::GLSurface::SURFACE_DEFAULT);
364 if (!surface_ || !surface_->Initialize(gl::GLSurface::SURFACE_DEFAULT)) {
365 surface_ = nullptr;
366 DLOG(ERROR) << "Failed to create surface.";
367 return false;
368 }
369 }
345 } 370 }
346 371
347 if (!surface_.get()) { 372 if (!surface_.get()) {
348 LOG(ERROR) << "Could not create GLSurface."; 373 LOG(ERROR) << "Could not create GLSurface.";
349 DestroyOnGpuThread(); 374 DestroyOnGpuThread();
350 return false; 375 return false;
351 } 376 }
352 377
353 sync_point_order_data_ = SyncPointOrderData::Create(); 378 sync_point_order_data_ = SyncPointOrderData::Create();
354 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( 379 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
437 void InProcessCommandBuffer::Destroy() { 462 void InProcessCommandBuffer::Destroy() {
438 CheckSequencedThread(); 463 CheckSequencedThread();
439 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 464 client_thread_weak_ptr_factory_.InvalidateWeakPtrs();
440 gpu_control_client_ = nullptr; 465 gpu_control_client_ = nullptr;
441 base::WaitableEvent completion( 466 base::WaitableEvent completion(
442 base::WaitableEvent::ResetPolicy::MANUAL, 467 base::WaitableEvent::ResetPolicy::MANUAL,
443 base::WaitableEvent::InitialState::NOT_SIGNALED); 468 base::WaitableEvent::InitialState::NOT_SIGNALED);
444 bool result = false; 469 bool result = false;
445 base::Callback<bool(void)> destroy_task = base::Bind( 470 base::Callback<bool(void)> destroy_task = base::Bind(
446 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); 471 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
447 QueueTask( 472 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result,
448 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); 473 &completion));
449 completion.Wait(); 474 completion.Wait();
450 } 475 }
451 476
452 bool InProcessCommandBuffer::DestroyOnGpuThread() { 477 bool InProcessCommandBuffer::DestroyOnGpuThread() {
453 CheckSequencedThread(); 478 CheckSequencedThread();
454 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 479 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
455 command_buffer_.reset(); 480 command_buffer_.reset();
456 // Clean up GL resources if possible. 481 // Clean up GL resources if possible.
457 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); 482 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
458 if (decoder_) { 483 if (decoder_) {
459 decoder_->Destroy(have_context); 484 decoder_->Destroy(have_context);
460 decoder_.reset(); 485 decoder_.reset();
461 } 486 }
462 context_ = nullptr; 487 context_ = nullptr;
463 surface_ = nullptr; 488 surface_ = nullptr;
464 sync_point_client_ = nullptr; 489 sync_point_client_ = nullptr;
465 if (sync_point_order_data_) { 490 if (sync_point_order_data_) {
466 sync_point_order_data_->Destroy(); 491 sync_point_order_data_->Destroy();
467 sync_point_order_data_ = nullptr; 492 sync_point_order_data_ = nullptr;
468 } 493 }
469 gl_share_group_ = nullptr; 494 gl_share_group_ = nullptr;
495 std::queue<std::unique_ptr<GpuTask>> empty;
496 task_queue_.swap(empty);
470 497
471 return true; 498 return true;
472 } 499 }
473 500
474 void InProcessCommandBuffer::CheckSequencedThread() { 501 void InProcessCommandBuffer::CheckSequencedThread() {
475 DCHECK(!sequence_checker_ || sequence_checker_->CalledOnValidSequence()); 502 DCHECK(!sequence_checker_ || sequence_checker_->CalledOnValidSequence());
476 } 503 }
477 504
478 void InProcessCommandBuffer::OnContextLostOnGpuThread() { 505 void InProcessCommandBuffer::OnContextLostOnGpuThread() {
479 if (!origin_task_runner_) 506 if (!origin_task_runner_)
(...skipping 17 matching lines...) Expand all
497 } 524 }
498 525
499 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { 526 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
500 CheckSequencedThread(); 527 CheckSequencedThread();
501 base::AutoLock lock(state_after_last_flush_lock_); 528 base::AutoLock lock(state_after_last_flush_lock_);
502 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) 529 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
503 last_state_ = state_after_last_flush_; 530 last_state_ = state_after_last_flush_;
504 return last_state_; 531 return last_state_;
505 } 532 }
506 533
534 void InProcessCommandBuffer::QueueTask(bool out_of_order,
535 const base::Closure& task) {
536 if (out_of_order) {
537 service_->ScheduleTask(task);
538 return;
539 }
540 base::AutoLock lock(task_queue_lock_);
541 SyncPointManager* sync_manager = service_->sync_point_manager();
542 uint32_t order_num =
543 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
544 task_queue_.push(base::MakeUnique<GpuTask>(task, order_num));
545 service_->ScheduleTask(base::Bind(
546 &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
547 }
548
549 void InProcessCommandBuffer::ProcessTasksOnGpuThread() {
550 while (executor_->scheduled()) {
551 base::AutoLock lock(task_queue_lock_);
552 if (task_queue_.empty())
553 break;
554 GpuTask* task = task_queue_.front().get();
555 sync_point_order_data_->BeginProcessingOrderNumber(task->order_number);
556 task->callback.Run();
557 if (!executor_->scheduled()) {
558 sync_point_order_data_->PauseProcessingOrderNumber(task->order_number);
559 return;
560 }
561 sync_point_order_data_->FinishProcessingOrderNumber(task->order_number);
562 task_queue_.pop();
563 }
564 }
565
507 CommandBuffer::State InProcessCommandBuffer::GetLastState() { 566 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
508 CheckSequencedThread(); 567 CheckSequencedThread();
509 return last_state_; 568 return last_state_;
510 } 569 }
511 570
512 int32_t InProcessCommandBuffer::GetLastToken() { 571 int32_t InProcessCommandBuffer::GetLastToken() {
513 CheckSequencedThread(); 572 CheckSequencedThread();
514 GetStateFast(); 573 GetStateFast();
515 return last_state_.token; 574 return last_state_.token;
516 } 575 }
517 576
518 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset, 577 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
519 uint32_t order_num) {
520 CheckSequencedThread(); 578 CheckSequencedThread();
521 ScopedEvent handle_flush(&flush_event_); 579 ScopedEvent handle_flush(&flush_event_);
522 base::AutoLock lock(command_buffer_lock_); 580 base::AutoLock lock(command_buffer_lock_);
523 581
524 { 582 {
525 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
526 order_num);
527 command_buffer_->Flush(put_offset); 583 command_buffer_->Flush(put_offset);
528 { 584 {
529 // Update state before signaling the flush event. 585 // Update state before signaling the flush event.
530 base::AutoLock lock(state_after_last_flush_lock_); 586 base::AutoLock lock(state_after_last_flush_lock_);
531 state_after_last_flush_ = command_buffer_->GetLastState(); 587 state_after_last_flush_ = command_buffer_->GetLastState();
532 } 588 }
533
534 // Currently the in process command buffer does not support being
535 // descheduled, if it does we would need to back off on calling the finish
536 // processing number function until the message is rescheduled and finished
537 // processing. This DCHECK is to enforce this.
538 DCHECK(error::IsError(state_after_last_flush_.error) ||
539 put_offset == state_after_last_flush_.get_offset);
540 } 589 }
541 590
542 // If we've processed all pending commands but still have pending queries, 591 // If we've processed all pending commands but still have pending queries,
543 // pump idle work until the query is passed. 592 // pump idle work until the query is passed.
544 if (put_offset == state_after_last_flush_.get_offset && 593 if (put_offset == state_after_last_flush_.get_offset &&
545 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { 594 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) {
546 ScheduleDelayedWorkOnGpuThread(); 595 ScheduleDelayedWorkOnGpuThread();
547 } 596 }
548 } 597 }
549 598
(...skipping 21 matching lines...) Expand all
571 } 620 }
572 621
573 void InProcessCommandBuffer::Flush(int32_t put_offset) { 622 void InProcessCommandBuffer::Flush(int32_t put_offset) {
574 CheckSequencedThread(); 623 CheckSequencedThread();
575 if (last_state_.error != gpu::error::kNoError) 624 if (last_state_.error != gpu::error::kNoError)
576 return; 625 return;
577 626
578 if (last_put_offset_ == put_offset) 627 if (last_put_offset_ == put_offset)
579 return; 628 return;
580 629
581 SyncPointManager* sync_manager = service_->sync_point_manager();
582 const uint32_t order_num =
583 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
584 last_put_offset_ = put_offset; 630 last_put_offset_ = put_offset;
585 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 631 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
586 gpu_thread_weak_ptr_, put_offset, order_num); 632 gpu_thread_weak_ptr_, put_offset);
587 QueueTask(task); 633 QueueTask(false, task);
588 634
589 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; 635 flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
590 } 636 }
591 637
592 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { 638 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
593 Flush(put_offset); 639 Flush(put_offset);
594 } 640 }
595 641
596 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { 642 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) {
597 CheckSequencedThread(); 643 CheckSequencedThread();
598 while (!InRange(start, end, GetLastToken()) && 644 while (!InRange(start, end, GetLastToken()) &&
599 last_state_.error == gpu::error::kNoError) 645 last_state_.error == gpu::error::kNoError) {
600 flush_event_.Wait(); 646 flush_event_.Wait();
647 }
601 } 648 }
602 649
603 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, 650 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start,
604 int32_t end) { 651 int32_t end) {
605 CheckSequencedThread(); 652 CheckSequencedThread();
606 653
607 GetStateFast(); 654 GetStateFast();
608 while (!InRange(start, end, last_state_.get_offset) && 655 while (!InRange(start, end, last_state_.get_offset) &&
609 last_state_.error == gpu::error::kNoError) { 656 last_state_.error == gpu::error::kNoError) {
610 flush_event_.Wait(); 657 flush_event_.Wait();
611 GetStateFast(); 658 GetStateFast();
612 } 659 }
613 } 660 }
614 661
615 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { 662 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
616 CheckSequencedThread(); 663 CheckSequencedThread();
617 if (last_state_.error != gpu::error::kNoError) 664 if (last_state_.error != gpu::error::kNoError)
618 return; 665 return;
619 666
620 base::WaitableEvent completion( 667 base::WaitableEvent completion(
621 base::WaitableEvent::ResetPolicy::MANUAL, 668 base::WaitableEvent::ResetPolicy::MANUAL,
622 base::WaitableEvent::InitialState::NOT_SIGNALED); 669 base::WaitableEvent::InitialState::NOT_SIGNALED);
623 base::Closure task = 670 base::Closure task =
624 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, 671 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
625 base::Unretained(this), shm_id, &completion); 672 base::Unretained(this), shm_id, &completion);
626 QueueTask(task); 673 QueueTask(false, task);
627 completion.Wait(); 674 completion.Wait();
628 675
629 { 676 {
630 base::AutoLock lock(state_after_last_flush_lock_); 677 base::AutoLock lock(state_after_last_flush_lock_);
631 state_after_last_flush_ = command_buffer_->GetLastState(); 678 state_after_last_flush_ = command_buffer_->GetLastState();
632 } 679 }
633 } 680 }
634 681
635 void InProcessCommandBuffer::SetGetBufferOnGpuThread( 682 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
636 int32_t shm_id, 683 int32_t shm_id,
(...skipping 11 matching lines...) Expand all
648 base::AutoLock lock(command_buffer_lock_); 695 base::AutoLock lock(command_buffer_lock_);
649 return command_buffer_->CreateTransferBuffer(size, id); 696 return command_buffer_->CreateTransferBuffer(size, id);
650 } 697 }
651 698
652 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { 699 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
653 CheckSequencedThread(); 700 CheckSequencedThread();
654 base::Closure task = 701 base::Closure task =
655 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, 702 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
656 base::Unretained(this), id); 703 base::Unretained(this), id);
657 704
658 QueueTask(task); 705 QueueTask(false, task);
659 } 706 }
660 707
661 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { 708 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
662 base::AutoLock lock(command_buffer_lock_); 709 base::AutoLock lock(command_buffer_lock_);
663 command_buffer_->DestroyTransferBuffer(id); 710 command_buffer_->DestroyTransferBuffer(id);
664 } 711 }
665 712
666 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { 713 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) {
667 gpu_control_client_ = client; 714 gpu_control_client_ = client;
668 } 715 }
(...skipping 20 matching lines...) Expand all
689 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( 736 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
690 internalformat, gpu_memory_buffer->GetFormat())); 737 internalformat, gpu_memory_buffer->GetFormat()));
691 738
692 // This handle is owned by the GPU thread and must be passed to it or it 739 // This handle is owned by the GPU thread and must be passed to it or it
693 // will leak. In otherwords, do not early out on error between here and the 740 // will leak. In otherwords, do not early out on error between here and the
694 // queuing of the CreateImage task below. 741 // queuing of the CreateImage task below.
695 bool requires_sync_point = false; 742 bool requires_sync_point = false;
696 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( 743 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread(
697 gpu_memory_buffer->GetHandle(), &requires_sync_point); 744 gpu_memory_buffer->GetHandle(), &requires_sync_point);
698 745
699 SyncPointManager* sync_manager = service_->sync_point_manager();
700 const uint32_t order_num =
701 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
702
703 uint64_t fence_sync = 0; 746 uint64_t fence_sync = 0;
704 if (requires_sync_point) { 747 if (requires_sync_point) {
705 fence_sync = GenerateFenceSyncRelease(); 748 fence_sync = GenerateFenceSyncRelease();
706 749
707 // Previous fence syncs should be flushed already. 750 // Previous fence syncs should be flushed already.
708 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); 751 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
709 } 752 }
710 753
711 QueueTask(base::Bind( 754 QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
712 &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this), 755 base::Unretained(this), new_id, handle,
713 new_id, handle, gfx::Size(base::checked_cast<int>(width), 756 gfx::Size(base::checked_cast<int>(width),
714 base::checked_cast<int>(height)), 757 base::checked_cast<int>(height)),
715 gpu_memory_buffer->GetFormat(), 758 gpu_memory_buffer->GetFormat(),
716 base::checked_cast<uint32_t>(internalformat), order_num, fence_sync)); 759 base::checked_cast<uint32_t>(internalformat),
760 fence_sync));
717 761
718 if (fence_sync) { 762 if (fence_sync) {
719 flushed_fence_sync_release_ = fence_sync; 763 flushed_fence_sync_release_ = fence_sync;
720 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), 764 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
721 GetCommandBufferID(), fence_sync); 765 GetCommandBufferID(), fence_sync);
722 sync_token.SetVerifyFlush(); 766 sync_token.SetVerifyFlush();
723 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, 767 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
724 sync_token); 768 sync_token);
725 } 769 }
726 770
727 return new_id; 771 return new_id;
728 } 772 }
729 773
730 void InProcessCommandBuffer::CreateImageOnGpuThread( 774 void InProcessCommandBuffer::CreateImageOnGpuThread(
731 int32_t id, 775 int32_t id,
732 const gfx::GpuMemoryBufferHandle& handle, 776 const gfx::GpuMemoryBufferHandle& handle,
733 const gfx::Size& size, 777 const gfx::Size& size,
734 gfx::BufferFormat format, 778 gfx::BufferFormat format,
735 uint32_t internalformat, 779 uint32_t internalformat,
736 uint32_t order_num,
737 uint64_t fence_sync) { 780 uint64_t fence_sync) {
738 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
739 order_num);
740 if (!decoder_) 781 if (!decoder_)
741 return; 782 return;
742 783
743 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 784 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
744 DCHECK(image_manager); 785 DCHECK(image_manager);
745 if (image_manager->LookupImage(id)) { 786 if (image_manager->LookupImage(id)) {
746 LOG(ERROR) << "Image already exists with same ID."; 787 LOG(ERROR) << "Image already exists with same ID.";
747 return; 788 return;
748 } 789 }
749 790
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 } 829 }
789 830
790 if (fence_sync) { 831 if (fence_sync) {
791 sync_point_client_->ReleaseFenceSync(fence_sync); 832 sync_point_client_->ReleaseFenceSync(fence_sync);
792 } 833 }
793 } 834 }
794 835
795 void InProcessCommandBuffer::DestroyImage(int32_t id) { 836 void InProcessCommandBuffer::DestroyImage(int32_t id) {
796 CheckSequencedThread(); 837 CheckSequencedThread();
797 838
798 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, 839 QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
799 base::Unretained(this), id)); 840 base::Unretained(this), id));
800 } 841 }
801 842
802 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { 843 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
803 if (!decoder_) 844 if (!decoder_)
804 return; 845 return;
805 846
806 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 847 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
807 DCHECK(image_manager); 848 DCHECK(image_manager);
808 if (!image_manager->LookupImage(id)) { 849 if (!image_manager->LookupImage(id)) {
809 LOG(ERROR) << "Image with ID doesn't exist."; 850 LOG(ERROR) << "Image with ID doesn't exist.";
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
843 mailbox_manager->PushTextureUpdates(sync_token); 884 mailbox_manager->PushTextureUpdates(sync_token);
844 } 885 }
845 886
846 sync_point_client_->ReleaseFenceSync(release); 887 sync_point_client_->ReleaseFenceSync(release);
847 } 888 }
848 889
849 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( 890 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
850 gpu::CommandBufferNamespace namespace_id, 891 gpu::CommandBufferNamespace namespace_id,
851 gpu::CommandBufferId command_buffer_id, 892 gpu::CommandBufferId command_buffer_id,
852 uint64_t release) { 893 uint64_t release) {
894 DCHECK(!waiting_for_sync_point_);
853 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); 895 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
854 DCHECK(sync_point_manager); 896 DCHECK(sync_point_manager);
855 897
856 scoped_refptr<gpu::SyncPointClientState> release_state = 898 scoped_refptr<gpu::SyncPointClientState> release_state =
857 sync_point_manager->GetSyncPointClientState(namespace_id, 899 sync_point_manager->GetSyncPointClientState(namespace_id,
858 command_buffer_id); 900 command_buffer_id);
859 901
860 if (!release_state) 902 if (!release_state)
861 return true; 903 return true;
862 904
863 if (!release_state->IsFenceSyncReleased(release)) { 905 if (release_state->IsFenceSyncReleased(release)) {
906 gles2::MailboxManager* mailbox_manager =
907 decoder_->GetContextGroup()->mailbox_manager();
908 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
909 mailbox_manager->PullTextureUpdates(sync_token);
910 return true;
911 }
912
913 if (service_->BlockThreadOnWaitSyncToken()) {
864 // Use waitable event which is signalled when the release fence is released. 914 // Use waitable event which is signalled when the release fence is released.
865 sync_point_client_->Wait( 915 sync_point_client_->Wait(
866 release_state.get(), release, 916 release_state.get(), release,
867 base::Bind(&base::WaitableEvent::Signal, 917 base::Bind(&base::WaitableEvent::Signal,
868 base::Unretained(&fence_sync_wait_event_))); 918 base::Unretained(&fence_sync_wait_event_)));
869 fence_sync_wait_event_.Wait(); 919 fence_sync_wait_event_.Wait();
920 return true;
870 } 921 }
871 922
923 waiting_for_sync_point_ = true;
924 sync_point_client_->Wait(
925 release_state.get(), release,
926 base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted,
927 gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id,
928 command_buffer_id, release));
929
930 if (!waiting_for_sync_point_)
931 return true;
932
933 executor_->SetScheduled(false);
934 return false;
935 }
936
937 void InProcessCommandBuffer::OnWaitFenceSyncCompleted(
938 CommandBufferNamespace namespace_id,
939 CommandBufferId command_buffer_id,
940 uint64_t release) {
941 DCHECK(waiting_for_sync_point_);
872 gles2::MailboxManager* mailbox_manager = 942 gles2::MailboxManager* mailbox_manager =
873 decoder_->GetContextGroup()->mailbox_manager(); 943 decoder_->GetContextGroup()->mailbox_manager();
874 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); 944 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
875 mailbox_manager->PullTextureUpdates(sync_token); 945 mailbox_manager->PullTextureUpdates(sync_token);
876 return true; 946 waiting_for_sync_point_ = false;
947 executor_->SetScheduled(true);
948 QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
949 gpu_thread_weak_ptr_, last_put_offset_));
877 } 950 }
878 951
879 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { 952 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
880 NOTIMPLEMENTED(); 953 DCHECK(executor_->scheduled());
954 DCHECK(executor_->HasPollingWork());
955
956 executor_->SetScheduled(false);
881 } 957 }
882 958
883 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { 959 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
884 NOTIMPLEMENTED(); 960 DCHECK(!executor_->scheduled());
961
962 executor_->SetScheduled(true);
963 ProcessTasksOnGpuThread();
885 } 964 }
886 965
887 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( 966 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
888 const SyncToken& sync_token, 967 const SyncToken& sync_token,
889 const base::Closure& callback) { 968 const base::Closure& callback) {
890 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); 969 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
891 DCHECK(sync_point_manager); 970 DCHECK(sync_point_manager);
892 971
893 scoped_refptr<gpu::SyncPointClientState> release_state = 972 scoped_refptr<gpu::SyncPointClientState> release_state =
894 sync_point_manager->GetSyncPointClientState( 973 sync_point_manager->GetSyncPointClientState(
895 sync_token.namespace_id(), sync_token.command_buffer_id()); 974 sync_token.namespace_id(), sync_token.command_buffer_id());
896 975
897 if (!release_state) { 976 if (!release_state) {
898 callback.Run(); 977 callback.Run();
899 return; 978 return;
900 } 979 }
901 980
902 sync_point_client_->WaitOutOfOrder( 981 sync_point_client_->WaitOutOfOrder(
903 release_state.get(), sync_token.release_count(), WrapCallback(callback)); 982 release_state.get(), sync_token.release_count(), WrapCallback(callback));
904 } 983 }
905 984
906 void InProcessCommandBuffer::SignalQuery(unsigned query_id, 985 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
907 const base::Closure& callback) { 986 const base::Closure& callback) {
908 CheckSequencedThread(); 987 CheckSequencedThread();
909 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, 988 QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
910 base::Unretained(this), query_id, 989 base::Unretained(this), query_id,
911 WrapCallback(callback))); 990 WrapCallback(callback)));
912 } 991 }
913 992
914 void InProcessCommandBuffer::SignalQueryOnGpuThread( 993 void InProcessCommandBuffer::SignalQueryOnGpuThread(
915 unsigned query_id, 994 unsigned query_id,
916 const base::Closure& callback) { 995 const base::Closure& callback) {
917 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); 996 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
918 DCHECK(query_manager_); 997 DCHECK(query_manager_);
919 998
920 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); 999 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
921 if (!query) 1000 if (!query)
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 return release <= flushed_fence_sync_release_; 1036 return release <= flushed_fence_sync_release_;
958 } 1037 }
959 1038
960 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { 1039 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) {
961 return IsFenceSyncFlushed(release); 1040 return IsFenceSyncFlushed(release);
962 } 1041 }
963 1042
964 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, 1043 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
965 const base::Closure& callback) { 1044 const base::Closure& callback) {
966 CheckSequencedThread(); 1045 CheckSequencedThread();
967 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, 1046 QueueTask(
968 base::Unretained(this), sync_token, 1047 true,
969 WrapCallback(callback))); 1048 base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
1049 base::Unretained(this), sync_token, WrapCallback(callback)));
970 } 1050 }
971 1051
972 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( 1052 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
973 const SyncToken* sync_token) { 1053 const SyncToken* sync_token) {
974 return sync_token->namespace_id() == GetNamespaceID(); 1054 return sync_token->namespace_id() == GetNamespaceID();
975 } 1055 }
976 1056
1057 #if defined(OS_WIN)
1058 void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
1059 SurfaceHandle parent_window,
1060 SurfaceHandle child_window) {
1061 // TODO(fsamuel): Implement this.
1062 }
1063 #endif
1064
1065 void InProcessCommandBuffer::DidSwapBuffersComplete(
1066 SwapBuffersCompleteParams params) {
1067 #if defined(OS_MACOSX)
1068 gpu::GpuProcessHostedCALayerTreeParamsMac params_mac;
1069 params_mac.ca_context_id = params.ca_context_id;
1070 params_mac.fullscreen_low_power_ca_context_valid =
1071 params.fullscreen_low_power_ca_context_valid;
1072 params_mac.fullscreen_low_power_ca_context_id =
1073 params.fullscreen_low_power_ca_context_id;
1074 params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface));
1075 params_mac.pixel_size = params.pixel_size;
1076 params_mac.scale_factor = params.scale_factor;
1077 params_mac.responses = std::move(params.in_use_responses);
1078 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = &params_mac;
1079 #else
1080 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr;
1081 #endif
1082 if (!swap_buffers_completion_callback_.is_null()) {
1083 if (!ui::LatencyInfo::Verify(
1084 params.latency_info,
1085 "InProcessCommandBuffer::DidSwapBuffersComplete")) {
1086 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
1087 params.result, mac_frame_ptr);
1088 } else {
1089 swap_buffers_completion_callback_.Run(params.latency_info, params.result,
1090 mac_frame_ptr);
1091 }
1092 }
1093 }
1094
1095 const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
1096 return context_group_->feature_info();
1097 }
1098
1099 void InProcessCommandBuffer::SetLatencyInfoCallback(
1100 const LatencyInfoCallback& callback) {
1101 // TODO(fsamuel): Implement this.
1102 }
1103
1104 void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase,
1105 base::TimeDelta interval) {
1106 if (!update_vsync_parameters_completion_callback_.is_null())
1107 update_vsync_parameters_completion_callback_.Run(timebase, interval);
1108 }
1109
1110 void InProcessCommandBuffer::SetSwapBuffersCompletionCallback(
1111 const SwapBuffersCompletionCallback& callback) {
1112 swap_buffers_completion_callback_ = callback;
1113 }
1114
1115 void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
1116 const UpdateVSyncParametersCallback& callback) {
1117 update_vsync_parameters_completion_callback_ = callback;
1118 }
1119
977 gpu::error::Error InProcessCommandBuffer::GetLastError() { 1120 gpu::error::Error InProcessCommandBuffer::GetLastError() {
978 CheckSequencedThread(); 1121 CheckSequencedThread();
979 return last_state_.error; 1122 return last_state_.error;
980 } 1123 }
981 1124
982 namespace { 1125 namespace {
983 1126
984 void PostCallback( 1127 void PostCallback(
985 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, 1128 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
986 const base::Closure& callback) { 1129 const base::Closure& callback) {
(...skipping 21 matching lines...) Expand all
1008 base::Closure callback_on_client_thread = 1151 base::Closure callback_on_client_thread =
1009 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback)); 1152 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
1010 base::Closure wrapped_callback = 1153 base::Closure wrapped_callback =
1011 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet() 1154 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet()
1012 ? base::ThreadTaskRunnerHandle::Get() 1155 ? base::ThreadTaskRunnerHandle::Get()
1013 : nullptr, 1156 : nullptr,
1014 callback_on_client_thread); 1157 callback_on_client_thread);
1015 return wrapped_callback; 1158 return wrapped_callback;
1016 } 1159 }
1017 1160
1018 GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager) 1161 InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback,
1019 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) { 1162 uint32_t order_number)
1020 Start(); 1163 : callback(callback), order_number(order_number) {}
1021 }
1022 1164
1023 GpuInProcessThread::~GpuInProcessThread() { 1165 InProcessCommandBuffer::GpuTask::~GpuTask() {}
1024 Stop();
1025 }
1026
1027 void GpuInProcessThread::AddRef() const {
1028 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
1029 }
1030 void GpuInProcessThread::Release() const {
1031 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
1032 }
1033
1034 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
1035 task_runner()->PostTask(FROM_HERE, task);
1036 }
1037
1038 void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) {
1039 // Match delay with GpuCommandBufferStub.
1040 task_runner()->PostDelayedTask(FROM_HERE, callback,
1041 base::TimeDelta::FromMilliseconds(2));
1042 }
1043
1044 bool GpuInProcessThread::UseVirtualizedGLContexts() {
1045 return false;
1046 }
1047
1048 scoped_refptr<gles2::ShaderTranslatorCache>
1049 GpuInProcessThread::shader_translator_cache() {
1050 if (!shader_translator_cache_.get()) {
1051 shader_translator_cache_ =
1052 new gpu::gles2::ShaderTranslatorCache(gpu_preferences());
1053 }
1054 return shader_translator_cache_;
1055 }
1056
1057 scoped_refptr<gles2::FramebufferCompletenessCache>
1058 GpuInProcessThread::framebuffer_completeness_cache() {
1059 if (!framebuffer_completeness_cache_.get())
1060 framebuffer_completeness_cache_ =
1061 new gpu::gles2::FramebufferCompletenessCache;
1062 return framebuffer_completeness_cache_;
1063 }
1064
1065 SyncPointManager* GpuInProcessThread::sync_point_manager() {
1066 return sync_point_manager_;
1067 }
1068 1166
1069 } // namespace gpu 1167 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698