OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/ipc/in_process_command_buffer.h" | 5 #include "gpu/ipc/in_process_command_buffer.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 #include <stdint.h> | 8 #include <stdint.h> |
9 | 9 |
10 #include <queue> | 10 #include <queue> |
11 #include <set> | 11 #include <set> |
12 #include <utility> | 12 #include <utility> |
13 | 13 |
14 #include "base/bind.h" | 14 #include "base/bind.h" |
15 #include "base/bind_helpers.h" | 15 #include "base/bind_helpers.h" |
16 #include "base/command_line.h" | 16 #include "base/command_line.h" |
17 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
18 #include "base/location.h" | 18 #include "base/location.h" |
19 #include "base/logging.h" | 19 #include "base/logging.h" |
20 #include "base/memory/ptr_util.h" | |
20 #include "base/memory/weak_ptr.h" | 21 #include "base/memory/weak_ptr.h" |
21 #include "base/numerics/safe_conversions.h" | 22 #include "base/numerics/safe_conversions.h" |
22 #include "base/sequence_checker.h" | 23 #include "base/sequence_checker.h" |
23 #include "base/single_thread_task_runner.h" | 24 #include "base/single_thread_task_runner.h" |
24 #include "base/threading/thread_task_runner_handle.h" | 25 #include "base/threading/thread_task_runner_handle.h" |
25 #include "gpu/command_buffer/client/gpu_control_client.h" | 26 #include "gpu/command_buffer/client/gpu_control_client.h" |
26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 27 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | 28 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" |
28 #include "gpu/command_buffer/common/sync_token.h" | 29 #include "gpu/command_buffer/common/sync_token.h" |
29 #include "gpu/command_buffer/service/command_buffer_service.h" | 30 #include "gpu/command_buffer/service/command_buffer_service.h" |
30 #include "gpu/command_buffer/service/command_executor.h" | 31 #include "gpu/command_buffer/service/command_executor.h" |
31 #include "gpu/command_buffer/service/context_group.h" | 32 #include "gpu/command_buffer/service/context_group.h" |
32 #include "gpu/command_buffer/service/gl_context_virtual.h" | 33 #include "gpu/command_buffer/service/gl_context_virtual.h" |
33 #include "gpu/command_buffer/service/gpu_preferences.h" | 34 #include "gpu/command_buffer/service/gpu_preferences.h" |
34 #include "gpu/command_buffer/service/image_factory.h" | 35 #include "gpu/command_buffer/service/image_factory.h" |
35 #include "gpu/command_buffer/service/image_manager.h" | 36 #include "gpu/command_buffer/service/image_manager.h" |
36 #include "gpu/command_buffer/service/mailbox_manager.h" | 37 #include "gpu/command_buffer/service/mailbox_manager.h" |
37 #include "gpu/command_buffer/service/memory_program_cache.h" | 38 #include "gpu/command_buffer/service/memory_program_cache.h" |
38 #include "gpu/command_buffer/service/memory_tracking.h" | 39 #include "gpu/command_buffer/service/memory_tracking.h" |
39 #include "gpu/command_buffer/service/query_manager.h" | 40 #include "gpu/command_buffer/service/query_manager.h" |
40 #include "gpu/command_buffer/service/service_utils.h" | 41 #include "gpu/command_buffer/service/service_utils.h" |
41 #include "gpu/command_buffer/service/sync_point_manager.h" | 42 #include "gpu/command_buffer/service/sync_point_manager.h" |
42 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | 43 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
44 #include "gpu/ipc/service/image_transport_surface.h" | |
43 #include "ui/gfx/geometry/size.h" | 45 #include "ui/gfx/geometry/size.h" |
44 #include "ui/gl/gl_context.h" | 46 #include "ui/gl/gl_context.h" |
45 #include "ui/gl/gl_image.h" | 47 #include "ui/gl/gl_image.h" |
46 #include "ui/gl/gl_image_shared_memory.h" | 48 #include "ui/gl/gl_image_shared_memory.h" |
47 #include "ui/gl/gl_share_group.h" | 49 #include "ui/gl/gl_share_group.h" |
48 #include "ui/gl/init/gl_factory.h" | 50 #include "ui/gl/init/gl_factory.h" |
49 | 51 |
50 #if defined(OS_WIN) | 52 #if defined(OS_WIN) |
51 #include <windows.h> | 53 #include <windows.h> |
52 #include "base/process/process_handle.h" | 54 #include "base/process/process_handle.h" |
53 #endif | 55 #endif |
54 | 56 |
55 namespace gpu { | 57 namespace gpu { |
56 | 58 |
57 namespace { | 59 namespace { |
58 | 60 |
59 base::StaticAtomicSequenceNumber g_next_command_buffer_id; | 61 base::StaticAtomicSequenceNumber g_next_command_buffer_id; |
60 | 62 |
61 template <typename T> | 63 template <typename T> |
62 static void RunTaskWithResult(base::Callback<T(void)> task, | 64 static void RunTaskWithResult(base::Callback<T(void)> task, |
63 T* result, | 65 T* result, |
64 base::WaitableEvent* completion) { | 66 base::WaitableEvent* completion) { |
65 *result = task.Run(); | 67 *result = task.Run(); |
66 completion->Signal(); | 68 completion->Signal(); |
67 } | 69 } |
68 | 70 |
69 struct ScopedOrderNumberProcessor { | |
70 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) | |
71 : order_data_(order_data), order_num_(order_num) { | |
72 order_data_->BeginProcessingOrderNumber(order_num_); | |
73 } | |
74 | |
75 ~ScopedOrderNumberProcessor() { | |
76 order_data_->FinishProcessingOrderNumber(order_num_); | |
77 } | |
78 | |
79 private: | |
80 SyncPointOrderData* order_data_; | |
81 uint32_t order_num_; | |
82 }; | |
83 | |
84 struct GpuInProcessThreadHolder { | 71 struct GpuInProcessThreadHolder { |
85 GpuInProcessThreadHolder() | 72 GpuInProcessThreadHolder() |
86 : sync_point_manager(new SyncPointManager(false)), | 73 : sync_point_manager(new SyncPointManager(false)), |
87 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {} | 74 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {} |
88 std::unique_ptr<SyncPointManager> sync_point_manager; | 75 std::unique_ptr<SyncPointManager> sync_point_manager; |
89 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread; | 76 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread; |
90 }; | 77 }; |
91 | 78 |
92 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = | 79 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = |
93 LAZY_INSTANCE_INITIALIZER; | 80 LAZY_INSTANCE_INITIALIZER; |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
157 | 144 |
158 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { | 145 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { |
159 return gpu_preferences_; | 146 return gpu_preferences_; |
160 } | 147 } |
161 | 148 |
162 const gpu::GpuDriverBugWorkarounds& | 149 const gpu::GpuDriverBugWorkarounds& |
163 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { | 150 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { |
164 return gpu_driver_bug_workarounds_; | 151 return gpu_driver_bug_workarounds_; |
165 } | 152 } |
166 | 153 |
167 scoped_refptr<gl::GLShareGroup> InProcessCommandBuffer::Service::share_group() { | |
168 if (!share_group_.get()) | |
169 share_group_ = new gl::GLShareGroup(); | |
170 return share_group_; | |
171 } | |
172 | |
173 scoped_refptr<gles2::MailboxManager> | 154 scoped_refptr<gles2::MailboxManager> |
174 InProcessCommandBuffer::Service::mailbox_manager() { | 155 InProcessCommandBuffer::Service::mailbox_manager() { |
175 if (!mailbox_manager_.get()) { | 156 if (!mailbox_manager_.get()) { |
176 mailbox_manager_ = gles2::MailboxManager::Create(gpu_preferences()); | 157 mailbox_manager_ = gles2::MailboxManager::Create(gpu_preferences()); |
177 } | 158 } |
178 return mailbox_manager_; | 159 return mailbox_manager_; |
179 } | 160 } |
180 | 161 |
181 gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() { | 162 gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() { |
182 if (!program_cache_.get() && | 163 if (!program_cache_.get() && |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
279 &capabilities, share_group, image_factory); | 260 &capabilities, share_group, image_factory); |
280 | 261 |
281 base::Callback<bool(void)> init_task = | 262 base::Callback<bool(void)> init_task = |
282 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, | 263 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
283 base::Unretained(this), params); | 264 base::Unretained(this), params); |
284 | 265 |
285 base::WaitableEvent completion( | 266 base::WaitableEvent completion( |
286 base::WaitableEvent::ResetPolicy::MANUAL, | 267 base::WaitableEvent::ResetPolicy::MANUAL, |
287 base::WaitableEvent::InitialState::NOT_SIGNALED); | 268 base::WaitableEvent::InitialState::NOT_SIGNALED); |
288 bool result = false; | 269 bool result = false; |
289 QueueTask( | 270 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result, |
290 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); | 271 &completion)); |
291 completion.Wait(); | 272 completion.Wait(); |
292 | 273 |
293 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; | 274 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; |
294 | 275 |
295 if (result) | 276 if (result) |
296 capabilities_ = capabilities; | 277 capabilities_ = capabilities; |
297 | 278 |
298 return result; | 279 return result; |
299 } | 280 } |
300 | 281 |
301 bool InProcessCommandBuffer::InitializeOnGpuThread( | 282 bool InProcessCommandBuffer::InitializeOnGpuThread( |
302 const InitializeOnGpuThreadParams& params) { | 283 const InitializeOnGpuThreadParams& params) { |
303 CheckSequencedThread(); | 284 CheckSequencedThread(); |
304 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr(); | 285 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr(); |
305 | 286 |
306 TransferBufferManager* manager = new TransferBufferManager(nullptr); | 287 TransferBufferManager* manager = new TransferBufferManager(nullptr); |
307 transfer_buffer_manager_ = manager; | 288 transfer_buffer_manager_ = manager; |
308 manager->Initialize(); | 289 manager->Initialize(); |
309 | 290 |
310 std::unique_ptr<CommandBufferService> command_buffer( | 291 std::unique_ptr<CommandBufferService> command_buffer( |
311 new CommandBufferService(transfer_buffer_manager_.get())); | 292 new CommandBufferService(transfer_buffer_manager_.get())); |
312 command_buffer->SetPutOffsetChangeCallback(base::Bind( | 293 command_buffer->SetPutOffsetChangeCallback(base::Bind( |
313 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); | 294 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); |
314 command_buffer->SetParseErrorCallback(base::Bind( | 295 command_buffer->SetParseErrorCallback(base::Bind( |
315 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); | 296 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); |
316 | 297 |
317 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ | 298 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ |
318 : service_->share_group(); | 299 : service_->GetShareGroup(); |
319 | 300 |
320 bool bind_generates_resource = false; | 301 bool bind_generates_resource = false; |
321 scoped_refptr<gles2::FeatureInfo> feature_info = | 302 scoped_refptr<gles2::FeatureInfo> feature_info = |
322 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); | 303 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); |
323 decoder_.reset(gles2::GLES2Decoder::Create( | 304 |
305 context_group_ = | |
324 params.context_group | 306 params.context_group |
325 ? params.context_group->decoder_->GetContextGroup() | 307 ? params.context_group->decoder_->GetContextGroup() |
326 : new gles2::ContextGroup( | 308 : new gles2::ContextGroup( |
327 service_->gpu_preferences(), service_->mailbox_manager(), NULL, | 309 service_->gpu_preferences(), service_->mailbox_manager(), NULL, |
328 service_->shader_translator_cache(), | 310 service_->shader_translator_cache(), |
329 service_->framebuffer_completeness_cache(), feature_info, | 311 service_->framebuffer_completeness_cache(), feature_info, |
330 bind_generates_resource, nullptr, nullptr))); | 312 bind_generates_resource, nullptr, nullptr); |
313 | |
314 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); | |
331 | 315 |
332 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), | 316 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), |
333 decoder_.get())); | 317 decoder_.get())); |
334 command_buffer->SetGetBufferChangeCallback(base::Bind( | 318 command_buffer->SetGetBufferChangeCallback(base::Bind( |
335 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); | 319 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); |
336 command_buffer_ = std::move(command_buffer); | 320 command_buffer_ = std::move(command_buffer); |
337 | 321 |
338 decoder_->set_engine(executor_.get()); | 322 decoder_->set_engine(executor_.get()); |
339 | 323 |
340 if (!surface_.get()) { | 324 if (!surface_.get()) { |
341 if (params.is_offscreen) | 325 surface_ = ImageTransportSurface::CreateNativeSurface( |
342 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); | 326 gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window, |
piman
2016/11/15 21:57:32
We need to keep the offscreen path, and only use I
Fady Samuel
2016/11/16 00:33:35
Done.
| |
343 else | 327 gl::GLSurface::SURFACE_DEFAULT); |
344 surface_ = gl::init::CreateViewGLSurface(params.window); | 328 if (!surface_ || !surface_->Initialize(gl::GLSurface::SURFACE_DEFAULT)) { |
329 surface_ = nullptr; | |
330 DLOG(ERROR) << "Failed to create surface."; | |
331 return false; | |
332 } | |
345 } | 333 } |
346 | 334 |
347 if (!surface_.get()) { | 335 if (!surface_.get()) { |
348 LOG(ERROR) << "Could not create GLSurface."; | 336 LOG(ERROR) << "Could not create GLSurface."; |
349 DestroyOnGpuThread(); | 337 DestroyOnGpuThread(); |
350 return false; | 338 return false; |
351 } | 339 } |
352 | 340 |
353 sync_point_order_data_ = SyncPointOrderData::Create(); | 341 sync_point_order_data_ = SyncPointOrderData::Create(); |
354 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( | 342 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 void InProcessCommandBuffer::Destroy() { | 425 void InProcessCommandBuffer::Destroy() { |
438 CheckSequencedThread(); | 426 CheckSequencedThread(); |
439 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 427 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
440 gpu_control_client_ = nullptr; | 428 gpu_control_client_ = nullptr; |
441 base::WaitableEvent completion( | 429 base::WaitableEvent completion( |
442 base::WaitableEvent::ResetPolicy::MANUAL, | 430 base::WaitableEvent::ResetPolicy::MANUAL, |
443 base::WaitableEvent::InitialState::NOT_SIGNALED); | 431 base::WaitableEvent::InitialState::NOT_SIGNALED); |
444 bool result = false; | 432 bool result = false; |
445 base::Callback<bool(void)> destroy_task = base::Bind( | 433 base::Callback<bool(void)> destroy_task = base::Bind( |
446 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | 434 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
447 QueueTask( | 435 QueueTask(false, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, |
448 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | 436 &completion)); |
449 completion.Wait(); | 437 completion.Wait(); |
450 } | 438 } |
451 | 439 |
452 bool InProcessCommandBuffer::DestroyOnGpuThread() { | 440 bool InProcessCommandBuffer::DestroyOnGpuThread() { |
453 CheckSequencedThread(); | 441 CheckSequencedThread(); |
454 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 442 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
455 command_buffer_.reset(); | 443 command_buffer_.reset(); |
456 // Clean up GL resources if possible. | 444 // Clean up GL resources if possible. |
457 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); | 445 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); |
458 if (decoder_) { | 446 if (decoder_) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
497 } | 485 } |
498 | 486 |
499 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | 487 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
500 CheckSequencedThread(); | 488 CheckSequencedThread(); |
501 base::AutoLock lock(state_after_last_flush_lock_); | 489 base::AutoLock lock(state_after_last_flush_lock_); |
502 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) | 490 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) |
503 last_state_ = state_after_last_flush_; | 491 last_state_ = state_after_last_flush_; |
504 return last_state_; | 492 return last_state_; |
505 } | 493 } |
506 | 494 |
495 void InProcessCommandBuffer::QueueTask(bool out_of_order, | |
496 const base::Closure& task) { | |
497 if (out_of_order) { | |
498 service_->ScheduleTask(task); | |
499 return; | |
500 } | |
501 base::AutoLock lock(task_queue_lock_); | |
502 SyncPointManager* sync_manager = service_->sync_point_manager(); | |
503 uint32_t order_num = | |
504 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); | |
505 task_queue_.push(base::MakeUnique<GpuTask>(task, order_num)); | |
506 service_->ScheduleTask( | |
507 base::Bind(&InProcessCommandBuffer::ProcessTasksOnGpuThread, | |
508 base::Unretained(this))); | |
509 } | |
510 | |
511 void InProcessCommandBuffer::ProcessTasksOnGpuThread() { | |
512 while (executor_->scheduled()) { | |
513 base::AutoLock lock(task_queue_lock_); | |
514 if (task_queue_.empty()) | |
515 break; | |
516 GpuTask* task = task_queue_.front().get(); | |
517 sync_point_order_data_->BeginProcessingOrderNumber(task->order_number); | |
518 task->callback.Run(); | |
519 if (!executor_->scheduled()) { | |
520 sync_point_order_data_->PauseProcessingOrderNumber(task->order_number); | |
521 return; | |
522 } | |
523 sync_point_order_data_->FinishProcessingOrderNumber(task->order_number); | |
524 task_queue_.pop(); | |
525 } | |
526 } | |
527 | |
507 CommandBuffer::State InProcessCommandBuffer::GetLastState() { | 528 CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
508 CheckSequencedThread(); | 529 CheckSequencedThread(); |
509 return last_state_; | 530 return last_state_; |
510 } | 531 } |
511 | 532 |
512 int32_t InProcessCommandBuffer::GetLastToken() { | 533 int32_t InProcessCommandBuffer::GetLastToken() { |
513 CheckSequencedThread(); | 534 CheckSequencedThread(); |
514 GetStateFast(); | 535 GetStateFast(); |
515 return last_state_.token; | 536 return last_state_.token; |
516 } | 537 } |
517 | 538 |
518 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset, | 539 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) { |
519 uint32_t order_num) { | |
520 CheckSequencedThread(); | 540 CheckSequencedThread(); |
521 ScopedEvent handle_flush(&flush_event_); | 541 ScopedEvent handle_flush(&flush_event_); |
522 base::AutoLock lock(command_buffer_lock_); | 542 base::AutoLock lock(command_buffer_lock_); |
523 | 543 |
524 { | 544 { |
525 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), | |
526 order_num); | |
527 command_buffer_->Flush(put_offset); | 545 command_buffer_->Flush(put_offset); |
528 { | 546 { |
529 // Update state before signaling the flush event. | 547 // Update state before signaling the flush event. |
530 base::AutoLock lock(state_after_last_flush_lock_); | 548 base::AutoLock lock(state_after_last_flush_lock_); |
531 state_after_last_flush_ = command_buffer_->GetLastState(); | 549 state_after_last_flush_ = command_buffer_->GetLastState(); |
532 } | 550 } |
533 | |
534 // Currently the in process command buffer does not support being | |
535 // descheduled, if it does we would need to back off on calling the finish | |
536 // processing number function until the message is rescheduled and finished | |
537 // processing. This DCHECK is to enforce this. | |
538 DCHECK(error::IsError(state_after_last_flush_.error) || | |
539 put_offset == state_after_last_flush_.get_offset); | |
540 } | 551 } |
541 | 552 |
542 // If we've processed all pending commands but still have pending queries, | 553 // If we've processed all pending commands but still have pending queries, |
543 // pump idle work until the query is passed. | 554 // pump idle work until the query is passed. |
544 if (put_offset == state_after_last_flush_.get_offset && | 555 if (put_offset == state_after_last_flush_.get_offset && |
545 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { | 556 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { |
546 ScheduleDelayedWorkOnGpuThread(); | 557 ScheduleDelayedWorkOnGpuThread(); |
547 } | 558 } |
548 } | 559 } |
549 | 560 |
(...skipping 21 matching lines...) Expand all Loading... | |
571 } | 582 } |
572 | 583 |
573 void InProcessCommandBuffer::Flush(int32_t put_offset) { | 584 void InProcessCommandBuffer::Flush(int32_t put_offset) { |
574 CheckSequencedThread(); | 585 CheckSequencedThread(); |
575 if (last_state_.error != gpu::error::kNoError) | 586 if (last_state_.error != gpu::error::kNoError) |
576 return; | 587 return; |
577 | 588 |
578 if (last_put_offset_ == put_offset) | 589 if (last_put_offset_ == put_offset) |
579 return; | 590 return; |
580 | 591 |
581 SyncPointManager* sync_manager = service_->sync_point_manager(); | |
582 const uint32_t order_num = | |
583 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); | |
584 last_put_offset_ = put_offset; | 592 last_put_offset_ = put_offset; |
585 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | 593 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
586 gpu_thread_weak_ptr_, put_offset, order_num); | 594 gpu_thread_weak_ptr_, put_offset); |
587 QueueTask(task); | 595 QueueTask(false, task); |
588 | 596 |
589 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; | 597 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; |
590 } | 598 } |
591 | 599 |
592 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { | 600 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { |
593 Flush(put_offset); | 601 Flush(put_offset); |
594 } | 602 } |
595 | 603 |
596 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { | 604 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { |
597 CheckSequencedThread(); | 605 CheckSequencedThread(); |
598 while (!InRange(start, end, GetLastToken()) && | 606 while (!InRange(start, end, GetLastToken()) && |
599 last_state_.error == gpu::error::kNoError) | 607 last_state_.error == gpu::error::kNoError) { |
600 flush_event_.Wait(); | 608 flush_event_.Wait(); |
609 } | |
601 } | 610 } |
602 | 611 |
603 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, | 612 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, |
604 int32_t end) { | 613 int32_t end) { |
605 CheckSequencedThread(); | 614 CheckSequencedThread(); |
606 | 615 |
607 GetStateFast(); | 616 GetStateFast(); |
608 while (!InRange(start, end, last_state_.get_offset) && | 617 while (!InRange(start, end, last_state_.get_offset) && |
609 last_state_.error == gpu::error::kNoError) { | 618 last_state_.error == gpu::error::kNoError) { |
610 flush_event_.Wait(); | 619 flush_event_.Wait(); |
611 GetStateFast(); | 620 GetStateFast(); |
612 } | 621 } |
613 } | 622 } |
614 | 623 |
615 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { | 624 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { |
616 CheckSequencedThread(); | 625 CheckSequencedThread(); |
617 if (last_state_.error != gpu::error::kNoError) | 626 if (last_state_.error != gpu::error::kNoError) |
618 return; | 627 return; |
619 | 628 |
620 base::WaitableEvent completion( | 629 base::WaitableEvent completion( |
621 base::WaitableEvent::ResetPolicy::MANUAL, | 630 base::WaitableEvent::ResetPolicy::MANUAL, |
622 base::WaitableEvent::InitialState::NOT_SIGNALED); | 631 base::WaitableEvent::InitialState::NOT_SIGNALED); |
623 base::Closure task = | 632 base::Closure task = |
624 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, | 633 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, |
625 base::Unretained(this), shm_id, &completion); | 634 base::Unretained(this), shm_id, &completion); |
626 QueueTask(task); | 635 QueueTask(false, task); |
627 completion.Wait(); | 636 completion.Wait(); |
628 | 637 |
629 { | 638 { |
630 base::AutoLock lock(state_after_last_flush_lock_); | 639 base::AutoLock lock(state_after_last_flush_lock_); |
631 state_after_last_flush_ = command_buffer_->GetLastState(); | 640 state_after_last_flush_ = command_buffer_->GetLastState(); |
632 } | 641 } |
633 } | 642 } |
634 | 643 |
635 void InProcessCommandBuffer::SetGetBufferOnGpuThread( | 644 void InProcessCommandBuffer::SetGetBufferOnGpuThread( |
636 int32_t shm_id, | 645 int32_t shm_id, |
(...skipping 11 matching lines...) Expand all Loading... | |
648 base::AutoLock lock(command_buffer_lock_); | 657 base::AutoLock lock(command_buffer_lock_); |
649 return command_buffer_->CreateTransferBuffer(size, id); | 658 return command_buffer_->CreateTransferBuffer(size, id); |
650 } | 659 } |
651 | 660 |
652 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { | 661 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { |
653 CheckSequencedThread(); | 662 CheckSequencedThread(); |
654 base::Closure task = | 663 base::Closure task = |
655 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, | 664 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, |
656 base::Unretained(this), id); | 665 base::Unretained(this), id); |
657 | 666 |
658 QueueTask(task); | 667 QueueTask(false, task); |
659 } | 668 } |
660 | 669 |
661 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { | 670 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { |
662 base::AutoLock lock(command_buffer_lock_); | 671 base::AutoLock lock(command_buffer_lock_); |
663 command_buffer_->DestroyTransferBuffer(id); | 672 command_buffer_->DestroyTransferBuffer(id); |
664 } | 673 } |
665 | 674 |
666 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { | 675 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { |
667 gpu_control_client_ = client; | 676 gpu_control_client_ = client; |
668 } | 677 } |
(...skipping 20 matching lines...) Expand all Loading... | |
689 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | 698 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( |
690 internalformat, gpu_memory_buffer->GetFormat())); | 699 internalformat, gpu_memory_buffer->GetFormat())); |
691 | 700 |
692 // This handle is owned by the GPU thread and must be passed to it or it | 701 // This handle is owned by the GPU thread and must be passed to it or it |
693 // will leak. In otherwords, do not early out on error between here and the | 702 // will leak. In otherwords, do not early out on error between here and the |
694 // queuing of the CreateImage task below. | 703 // queuing of the CreateImage task below. |
695 bool requires_sync_point = false; | 704 bool requires_sync_point = false; |
696 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( | 705 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( |
697 gpu_memory_buffer->GetHandle(), &requires_sync_point); | 706 gpu_memory_buffer->GetHandle(), &requires_sync_point); |
698 | 707 |
699 SyncPointManager* sync_manager = service_->sync_point_manager(); | |
700 const uint32_t order_num = | |
701 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); | |
702 | |
703 uint64_t fence_sync = 0; | 708 uint64_t fence_sync = 0; |
704 if (requires_sync_point) { | 709 if (requires_sync_point) { |
705 fence_sync = GenerateFenceSyncRelease(); | 710 fence_sync = GenerateFenceSyncRelease(); |
706 | 711 |
707 // Previous fence syncs should be flushed already. | 712 // Previous fence syncs should be flushed already. |
708 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); | 713 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); |
709 } | 714 } |
710 | 715 |
711 QueueTask(base::Bind( | 716 QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
712 &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this), | 717 base::Unretained(this), new_id, handle, |
713 new_id, handle, gfx::Size(base::checked_cast<int>(width), | 718 gfx::Size(base::checked_cast<int>(width), |
714 base::checked_cast<int>(height)), | 719 base::checked_cast<int>(height)), |
715 gpu_memory_buffer->GetFormat(), | 720 gpu_memory_buffer->GetFormat(), |
716 base::checked_cast<uint32_t>(internalformat), order_num, fence_sync)); | 721 base::checked_cast<uint32_t>(internalformat), |
722 fence_sync)); | |
717 | 723 |
718 if (fence_sync) { | 724 if (fence_sync) { |
719 flushed_fence_sync_release_ = fence_sync; | 725 flushed_fence_sync_release_ = fence_sync; |
720 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), | 726 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), |
721 GetCommandBufferID(), fence_sync); | 727 GetCommandBufferID(), fence_sync); |
722 sync_token.SetVerifyFlush(); | 728 sync_token.SetVerifyFlush(); |
723 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, | 729 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, |
724 sync_token); | 730 sync_token); |
725 } | 731 } |
726 | 732 |
727 return new_id; | 733 return new_id; |
728 } | 734 } |
729 | 735 |
730 void InProcessCommandBuffer::CreateImageOnGpuThread( | 736 void InProcessCommandBuffer::CreateImageOnGpuThread( |
731 int32_t id, | 737 int32_t id, |
732 const gfx::GpuMemoryBufferHandle& handle, | 738 const gfx::GpuMemoryBufferHandle& handle, |
733 const gfx::Size& size, | 739 const gfx::Size& size, |
734 gfx::BufferFormat format, | 740 gfx::BufferFormat format, |
735 uint32_t internalformat, | 741 uint32_t internalformat, |
736 uint32_t order_num, | |
737 uint64_t fence_sync) { | 742 uint64_t fence_sync) { |
738 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), | |
739 order_num); | |
740 if (!decoder_) | 743 if (!decoder_) |
741 return; | 744 return; |
742 | 745 |
743 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); | 746 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
744 DCHECK(image_manager); | 747 DCHECK(image_manager); |
745 if (image_manager->LookupImage(id)) { | 748 if (image_manager->LookupImage(id)) { |
746 LOG(ERROR) << "Image already exists with same ID."; | 749 LOG(ERROR) << "Image already exists with same ID."; |
747 return; | 750 return; |
748 } | 751 } |
749 | 752 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
788 } | 791 } |
789 | 792 |
790 if (fence_sync) { | 793 if (fence_sync) { |
791 sync_point_client_->ReleaseFenceSync(fence_sync); | 794 sync_point_client_->ReleaseFenceSync(fence_sync); |
792 } | 795 } |
793 } | 796 } |
794 | 797 |
795 void InProcessCommandBuffer::DestroyImage(int32_t id) { | 798 void InProcessCommandBuffer::DestroyImage(int32_t id) { |
796 CheckSequencedThread(); | 799 CheckSequencedThread(); |
797 | 800 |
798 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, | 801 QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, |
799 base::Unretained(this), id)); | 802 base::Unretained(this), id)); |
800 } | 803 } |
801 | 804 |
802 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { | 805 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { |
803 if (!decoder_) | 806 if (!decoder_) |
804 return; | 807 return; |
805 | 808 |
806 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); | 809 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
807 DCHECK(image_manager); | 810 DCHECK(image_manager); |
808 if (!image_manager->LookupImage(id)) { | 811 if (!image_manager->LookupImage(id)) { |
809 LOG(ERROR) << "Image with ID doesn't exist."; | 812 LOG(ERROR) << "Image with ID doesn't exist."; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
843 mailbox_manager->PushTextureUpdates(sync_token); | 846 mailbox_manager->PushTextureUpdates(sync_token); |
844 } | 847 } |
845 | 848 |
846 sync_point_client_->ReleaseFenceSync(release); | 849 sync_point_client_->ReleaseFenceSync(release); |
847 } | 850 } |
848 | 851 |
849 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( | 852 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( |
850 gpu::CommandBufferNamespace namespace_id, | 853 gpu::CommandBufferNamespace namespace_id, |
851 gpu::CommandBufferId command_buffer_id, | 854 gpu::CommandBufferId command_buffer_id, |
852 uint64_t release) { | 855 uint64_t release) { |
856 DCHECK(!waiting_for_sync_point_); | |
853 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); | 857 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); |
854 DCHECK(sync_point_manager); | 858 DCHECK(sync_point_manager); |
855 | 859 |
856 scoped_refptr<gpu::SyncPointClientState> release_state = | 860 scoped_refptr<gpu::SyncPointClientState> release_state = |
857 sync_point_manager->GetSyncPointClientState(namespace_id, | 861 sync_point_manager->GetSyncPointClientState(namespace_id, |
858 command_buffer_id); | 862 command_buffer_id); |
859 | 863 |
860 if (!release_state) | 864 if (!release_state) |
861 return true; | 865 return true; |
862 | 866 |
863 if (!release_state->IsFenceSyncReleased(release)) { | 867 if (release_state->IsFenceSyncReleased(release)) { |
868 gles2::MailboxManager* mailbox_manager = | |
869 decoder_->GetContextGroup()->mailbox_manager(); | |
870 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); | |
871 mailbox_manager->PullTextureUpdates(sync_token); | |
872 return true; | |
873 } | |
874 | |
875 if (service_->BlockThreadOnWaitSyncToken()) { | |
864 // Use waitable event which is signalled when the release fence is released. | 876 // Use waitable event which is signalled when the release fence is released. |
865 sync_point_client_->Wait( | 877 sync_point_client_->Wait( |
866 release_state.get(), release, | 878 release_state.get(), release, |
867 base::Bind(&base::WaitableEvent::Signal, | 879 base::Bind(&base::WaitableEvent::Signal, |
868 base::Unretained(&fence_sync_wait_event_))); | 880 base::Unretained(&fence_sync_wait_event_))); |
869 fence_sync_wait_event_.Wait(); | 881 fence_sync_wait_event_.Wait(); |
882 return true; | |
870 } | 883 } |
871 | 884 |
885 waiting_for_sync_point_ = true; | |
886 sync_point_client_->Wait( | |
887 release_state.get(), release, | |
888 base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted, | |
889 gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id, | |
890 command_buffer_id, release)); | |
891 | |
892 if (!waiting_for_sync_point_) | |
893 return true; | |
894 | |
895 executor_->SetScheduled(false); | |
896 return false; | |
897 } | |
898 | |
899 void InProcessCommandBuffer::OnWaitFenceSyncCompleted( | |
900 CommandBufferNamespace namespace_id, | |
901 CommandBufferId command_buffer_id, | |
902 uint64_t release) { | |
903 DCHECK(waiting_for_sync_point_); | |
872 gles2::MailboxManager* mailbox_manager = | 904 gles2::MailboxManager* mailbox_manager = |
873 decoder_->GetContextGroup()->mailbox_manager(); | 905 decoder_->GetContextGroup()->mailbox_manager(); |
874 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); | 906 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
875 mailbox_manager->PullTextureUpdates(sync_token); | 907 mailbox_manager->PullTextureUpdates(sync_token); |
876 return true; | 908 waiting_for_sync_point_ = false; |
909 executor_->SetScheduled(true); | |
910 QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | |
911 gpu_thread_weak_ptr_, last_put_offset_)); | |
877 } | 912 } |
878 | 913 |
879 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { | 914 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { |
880 NOTIMPLEMENTED(); | 915 DCHECK(executor_->scheduled()); |
916 DCHECK(executor_->HasPollingWork()); | |
917 | |
918 executor_->SetScheduled(false); | |
881 } | 919 } |
882 | 920 |
883 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { | 921 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { |
884 NOTIMPLEMENTED(); | 922 DCHECK(!executor_->scheduled()); |
923 | |
924 executor_->SetScheduled(true); | |
925 ProcessTasksOnGpuThread(); | |
885 } | 926 } |
886 | 927 |
887 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( | 928 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( |
888 const SyncToken& sync_token, | 929 const SyncToken& sync_token, |
889 const base::Closure& callback) { | 930 const base::Closure& callback) { |
890 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); | 931 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); |
891 DCHECK(sync_point_manager); | 932 DCHECK(sync_point_manager); |
892 | 933 |
893 scoped_refptr<gpu::SyncPointClientState> release_state = | 934 scoped_refptr<gpu::SyncPointClientState> release_state = |
894 sync_point_manager->GetSyncPointClientState( | 935 sync_point_manager->GetSyncPointClientState( |
895 sync_token.namespace_id(), sync_token.command_buffer_id()); | 936 sync_token.namespace_id(), sync_token.command_buffer_id()); |
896 | 937 |
897 if (!release_state) { | 938 if (!release_state) { |
898 callback.Run(); | 939 callback.Run(); |
899 return; | 940 return; |
900 } | 941 } |
901 | 942 |
902 sync_point_client_->WaitOutOfOrder( | 943 sync_point_client_->WaitOutOfOrder( |
903 release_state.get(), sync_token.release_count(), WrapCallback(callback)); | 944 release_state.get(), sync_token.release_count(), WrapCallback(callback)); |
904 } | 945 } |
905 | 946 |
906 void InProcessCommandBuffer::SignalQuery(unsigned query_id, | 947 void InProcessCommandBuffer::SignalQuery(unsigned query_id, |
907 const base::Closure& callback) { | 948 const base::Closure& callback) { |
908 CheckSequencedThread(); | 949 CheckSequencedThread(); |
909 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, | 950 QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, |
910 base::Unretained(this), query_id, | 951 base::Unretained(this), query_id, |
911 WrapCallback(callback))); | 952 WrapCallback(callback))); |
912 } | 953 } |
913 | 954 |
914 void InProcessCommandBuffer::SignalQueryOnGpuThread( | 955 void InProcessCommandBuffer::SignalQueryOnGpuThread( |
915 unsigned query_id, | 956 unsigned query_id, |
916 const base::Closure& callback) { | 957 const base::Closure& callback) { |
917 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); | 958 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); |
918 DCHECK(query_manager_); | 959 DCHECK(query_manager_); |
919 | 960 |
920 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); | 961 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); |
921 if (!query) | 962 if (!query) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
957 return release <= flushed_fence_sync_release_; | 998 return release <= flushed_fence_sync_release_; |
958 } | 999 } |
959 | 1000 |
960 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { | 1001 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { |
961 return IsFenceSyncFlushed(release); | 1002 return IsFenceSyncFlushed(release); |
962 } | 1003 } |
963 | 1004 |
964 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, | 1005 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, |
965 const base::Closure& callback) { | 1006 const base::Closure& callback) { |
966 CheckSequencedThread(); | 1007 CheckSequencedThread(); |
967 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, | 1008 QueueTask( |
968 base::Unretained(this), sync_token, | 1009 true, |
969 WrapCallback(callback))); | 1010 base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, |
1011 base::Unretained(this), sync_token, WrapCallback(callback))); | |
970 } | 1012 } |
971 | 1013 |
972 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( | 1014 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( |
973 const SyncToken* sync_token) { | 1015 const SyncToken* sync_token) { |
974 return sync_token->namespace_id() == GetNamespaceID(); | 1016 return sync_token->namespace_id() == GetNamespaceID(); |
975 } | 1017 } |
976 | 1018 |
1019 void InProcessCommandBuffer::DidSwapBuffersComplete( | |
1020 SwapBuffersCompleteParams params) { | |
1021 #if defined(OS_MACOSX) | |
1022 gpu::GpuProcessHostedCALayerTreeParamsMac params_mac; | |
1023 params_mac.ca_context_id = params.ca_context_id; | |
1024 params_mac.fullscreen_low_power_ca_context_valid = | |
1025 params.fullscreen_low_power_ca_context_valid; | |
1026 params_mac.fullscreen_low_power_ca_context_id = | |
1027 params.fullscreen_low_power_ca_context_id; | |
1028 params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface)); | |
1029 params_mac.pixel_size = params.pixel_size; | |
1030 params_mac.scale_factor = params.scale_factor; | |
1031 params_mac.responses = std::move(params.in_use_responses); | |
1032 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = ¶ms_mac; | |
1033 #else | |
1034 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr; | |
1035 #endif | |
1036 if (!swap_buffers_completion_callback_.is_null()) { | |
1037 if (!ui::LatencyInfo::Verify( | |
1038 params.latency_info, | |
1039 "InProcessCommandBuffer::DidSwapBuffersComplete")) { | |
1040 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), | |
1041 params.result, mac_frame_ptr); | |
1042 } else { | |
1043 swap_buffers_completion_callback_.Run(params.latency_info, params.result, | |
1044 mac_frame_ptr); | |
1045 } | |
1046 } | |
1047 } | |
1048 | |
1049 const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const { | |
1050 return context_group_->feature_info(); | |
1051 } | |
1052 | |
1053 void InProcessCommandBuffer::SetLatencyInfoCallback( | |
1054 const LatencyInfoCallback& callback) { | |
1055 // TODO(fsamuel): Implement this. | |
1056 } | |
1057 | |
1058 void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase, | |
1059 base::TimeDelta interval) { | |
1060 if (!update_vsync_parameters_completion_callback_.is_null()) | |
1061 update_vsync_parameters_completion_callback_.Run(timebase, interval); | |
1062 } | |
1063 | |
1064 void InProcessCommandBuffer::SetSwapBuffersCompletionCallback( | |
1065 const SwapBuffersCompletionCallback& callback) { | |
1066 swap_buffers_completion_callback_ = callback; | |
1067 } | |
1068 | |
1069 void InProcessCommandBuffer::SetUpdateVSyncParametersCallback( | |
1070 const UpdateVSyncParametersCallback& callback) { | |
1071 update_vsync_parameters_completion_callback_ = callback; | |
1072 } | |
1073 | |
977 gpu::error::Error InProcessCommandBuffer::GetLastError() { | 1074 gpu::error::Error InProcessCommandBuffer::GetLastError() { |
978 CheckSequencedThread(); | 1075 CheckSequencedThread(); |
979 return last_state_.error; | 1076 return last_state_.error; |
980 } | 1077 } |
981 | 1078 |
982 namespace { | 1079 namespace { |
983 | 1080 |
984 void PostCallback( | 1081 void PostCallback( |
985 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, | 1082 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, |
986 const base::Closure& callback) { | 1083 const base::Closure& callback) { |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1059 if (!framebuffer_completeness_cache_.get()) | 1156 if (!framebuffer_completeness_cache_.get()) |
1060 framebuffer_completeness_cache_ = | 1157 framebuffer_completeness_cache_ = |
1061 new gpu::gles2::FramebufferCompletenessCache; | 1158 new gpu::gles2::FramebufferCompletenessCache; |
1062 return framebuffer_completeness_cache_; | 1159 return framebuffer_completeness_cache_; |
1063 } | 1160 } |
1064 | 1161 |
1065 SyncPointManager* GpuInProcessThread::sync_point_manager() { | 1162 SyncPointManager* GpuInProcessThread::sync_point_manager() { |
1066 return sync_point_manager_; | 1163 return sync_point_manager_; |
1067 } | 1164 } |
1068 | 1165 |
1166 const scoped_refptr<gl::GLShareGroup>& GpuInProcessThread::GetShareGroup() | |
1167 const { | |
1168 return share_group_; | |
1169 } | |
1170 | |
1171 bool GpuInProcessThread::BlockThreadOnWaitSyncToken() const { | |
1172 return true; | |
piman
2016/11/15 21:57:32
I think this should be false here, but set to true
Fady Samuel
2016/11/16 00:33:35
Done.
| |
1173 } | |
1174 | |
1175 InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback, | |
1176 uint32_t order_number) | |
1177 : callback(callback), order_number(order_number) {} | |
1178 | |
1179 InProcessCommandBuffer::GpuTask::~GpuTask() {} | |
1180 | |
1069 } // namespace gpu | 1181 } // namespace gpu |
OLD | NEW |