OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
6 #include <windows.h> | 6 #include <windows.h> |
7 #endif | 7 #endif |
8 | 8 |
9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
10 | 10 |
11 #include <queue> | 11 #include <queue> |
12 #include <vector> | 12 #include <vector> |
13 | 13 |
14 #include "base/bind.h" | 14 #include "base/bind.h" |
15 #include "base/command_line.h" | 15 #include "base/command_line.h" |
16 #include "base/message_loop/message_loop_proxy.h" | 16 #include "base/location.h" |
| 17 #include "base/single_thread_task_runner.h" |
17 #include "base/stl_util.h" | 18 #include "base/stl_util.h" |
18 #include "base/strings/string_util.h" | 19 #include "base/strings/string_util.h" |
| 20 #include "base/thread_task_runner_handle.h" |
19 #include "base/timer/timer.h" | 21 #include "base/timer/timer.h" |
20 #include "base/trace_event/trace_event.h" | 22 #include "base/trace_event/trace_event.h" |
21 #include "content/common/gpu/gpu_channel_manager.h" | 23 #include "content/common/gpu/gpu_channel_manager.h" |
22 #include "content/common/gpu/gpu_memory_buffer_factory.h" | 24 #include "content/common/gpu/gpu_memory_buffer_factory.h" |
23 #include "content/common/gpu/gpu_messages.h" | 25 #include "content/common/gpu/gpu_messages.h" |
24 #include "content/public/common/content_switches.h" | 26 #include "content/public/common/content_switches.h" |
25 #include "gpu/command_buffer/common/mailbox.h" | 27 #include "gpu/command_buffer/common/mailbox.h" |
26 #include "gpu/command_buffer/common/value_state.h" | 28 #include "gpu/command_buffer/common/value_state.h" |
27 #include "gpu/command_buffer/service/gpu_scheduler.h" | 29 #include "gpu/command_buffer/service/gpu_scheduler.h" |
28 #include "gpu/command_buffer/service/image_factory.h" | 30 #include "gpu/command_buffer/service/image_factory.h" |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO | 72 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO |
71 // thread, generating the sync point ID and responding immediately, and then | 73 // thread, generating the sync point ID and responding immediately, and then |
72 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message | 74 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message |
73 // into the channel's queue. | 75 // into the channel's queue. |
74 // - it generates mailbox names for clients of the GPU process on the IO thread. | 76 // - it generates mailbox names for clients of the GPU process on the IO thread. |
75 class GpuChannelMessageFilter : public IPC::MessageFilter { | 77 class GpuChannelMessageFilter : public IPC::MessageFilter { |
76 public: | 78 public: |
77 GpuChannelMessageFilter( | 79 GpuChannelMessageFilter( |
78 base::WeakPtr<GpuChannel> gpu_channel, | 80 base::WeakPtr<GpuChannel> gpu_channel, |
79 scoped_refptr<gpu::SyncPointManager> sync_point_manager, | 81 scoped_refptr<gpu::SyncPointManager> sync_point_manager, |
80 scoped_refptr<base::MessageLoopProxy> message_loop, | 82 scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
81 bool future_sync_points) | 83 bool future_sync_points) |
82 : preemption_state_(IDLE), | 84 : preemption_state_(IDLE), |
83 gpu_channel_(gpu_channel), | 85 gpu_channel_(gpu_channel), |
84 sender_(NULL), | 86 sender_(NULL), |
85 sync_point_manager_(sync_point_manager), | 87 sync_point_manager_(sync_point_manager), |
86 message_loop_(message_loop), | 88 task_runner_(task_runner), |
87 messages_forwarded_to_channel_(0), | 89 messages_forwarded_to_channel_(0), |
88 a_stub_is_descheduled_(false), | 90 a_stub_is_descheduled_(false), |
89 future_sync_points_(future_sync_points) {} | 91 future_sync_points_(future_sync_points) {} |
90 | 92 |
91 void OnFilterAdded(IPC::Sender* sender) override { | 93 void OnFilterAdded(IPC::Sender* sender) override { |
92 DCHECK(!sender_); | 94 DCHECK(!sender_); |
93 sender_ = sender; | 95 sender_ = sender; |
94 } | 96 } |
95 | 97 |
96 void OnFilterRemoved() override { | 98 void OnFilterRemoved() override { |
(...skipping 23 matching lines...) Expand all Loading... |
120 } | 122 } |
121 if (!future_sync_points_ && !get<0>(retire)) { | 123 if (!future_sync_points_ && !get<0>(retire)) { |
122 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 124 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
123 reply->set_reply_error(); | 125 reply->set_reply_error(); |
124 Send(reply); | 126 Send(reply); |
125 return true; | 127 return true; |
126 } | 128 } |
127 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); | 129 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); |
128 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 130 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
129 Send(reply); | 131 Send(reply); |
130 message_loop_->PostTask( | 132 task_runner_->PostTask( |
131 FROM_HERE, | 133 FROM_HERE, |
132 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, | 134 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, |
133 gpu_channel_, | 135 gpu_channel_, sync_point_manager_, message.routing_id(), |
134 sync_point_manager_, | 136 get<0>(retire), sync_point)); |
135 message.routing_id(), | |
136 get<0>(retire), | |
137 sync_point)); | |
138 handled = true; | 137 handled = true; |
139 } | 138 } |
140 | 139 |
141 // All other messages get processed by the GpuChannel. | 140 // All other messages get processed by the GpuChannel. |
142 messages_forwarded_to_channel_++; | 141 messages_forwarded_to_channel_++; |
143 if (preempting_flag_.get()) | 142 if (preempting_flag_.get()) |
144 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); | 143 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); |
145 UpdatePreemptionState(); | 144 UpdatePreemptionState(); |
146 | 145 |
147 return handled; | 146 return handled; |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
376 } | 375 } |
377 } | 376 } |
378 manager->RetireSyncPoint(sync_point); | 377 manager->RetireSyncPoint(sync_point); |
379 } | 378 } |
380 | 379 |
381 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only | 380 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only |
382 // passed through - therefore the WeakPtr assumptions are respected. | 381 // passed through - therefore the WeakPtr assumptions are respected. |
383 base::WeakPtr<GpuChannel> gpu_channel_; | 382 base::WeakPtr<GpuChannel> gpu_channel_; |
384 IPC::Sender* sender_; | 383 IPC::Sender* sender_; |
385 scoped_refptr<gpu::SyncPointManager> sync_point_manager_; | 384 scoped_refptr<gpu::SyncPointManager> sync_point_manager_; |
386 scoped_refptr<base::MessageLoopProxy> message_loop_; | 385 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
387 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; | 386 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; |
388 | 387 |
389 std::queue<PendingMessage> pending_messages_; | 388 std::queue<PendingMessage> pending_messages_; |
390 | 389 |
391 // Count of the number of IPCs forwarded to the GpuChannel. | 390 // Count of the number of IPCs forwarded to the GpuChannel. |
392 uint64 messages_forwarded_to_channel_; | 391 uint64 messages_forwarded_to_channel_; |
393 | 392 |
394 base::OneShotTimer<GpuChannelMessageFilter> timer_; | 393 base::OneShotTimer<GpuChannelMessageFilter> timer_; |
395 | 394 |
396 bool a_stub_is_descheduled_; | 395 bool a_stub_is_descheduled_; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
430 subscription_ref_set_->AddObserver(this); | 429 subscription_ref_set_->AddObserver(this); |
431 } | 430 } |
432 | 431 |
433 GpuChannel::~GpuChannel() { | 432 GpuChannel::~GpuChannel() { |
434 STLDeleteElements(&deferred_messages_); | 433 STLDeleteElements(&deferred_messages_); |
435 subscription_ref_set_->RemoveObserver(this); | 434 subscription_ref_set_->RemoveObserver(this); |
436 if (preempting_flag_.get()) | 435 if (preempting_flag_.get()) |
437 preempting_flag_->Reset(); | 436 preempting_flag_->Reset(); |
438 } | 437 } |
439 | 438 |
440 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop, | 439 void GpuChannel::Init(base::SingleThreadTaskRunner* io_task_runner, |
441 base::WaitableEvent* shutdown_event) { | 440 base::WaitableEvent* shutdown_event) { |
442 DCHECK(!channel_.get()); | 441 DCHECK(!channel_.get()); |
443 | 442 |
444 // Map renderer ID to a (single) channel to that process. | 443 // Map renderer ID to a (single) channel to that process. |
445 channel_ = IPC::SyncChannel::Create(channel_id_, | 444 channel_ = |
446 IPC::Channel::MODE_SERVER, | 445 IPC::SyncChannel::Create(channel_id_, IPC::Channel::MODE_SERVER, this, |
447 this, | 446 io_task_runner, false, shutdown_event); |
448 io_message_loop, | |
449 false, | |
450 shutdown_event); | |
451 | 447 |
452 filter_ = | 448 filter_ = new GpuChannelMessageFilter( |
453 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(), | 449 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), |
454 gpu_channel_manager_->sync_point_manager(), | 450 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_); |
455 base::MessageLoopProxy::current(), | 451 io_task_runner_ = io_task_runner; |
456 allow_future_sync_points_); | |
457 io_message_loop_ = io_message_loop; | |
458 channel_->AddFilter(filter_.get()); | 452 channel_->AddFilter(filter_.get()); |
459 pending_valuebuffer_state_ = new gpu::ValueStateMap(); | 453 pending_valuebuffer_state_ = new gpu::ValueStateMap(); |
460 } | 454 } |
461 | 455 |
462 std::string GpuChannel::GetChannelName() { | 456 std::string GpuChannel::GetChannelName() { |
463 return channel_id_; | 457 return channel_id_; |
464 } | 458 } |
465 | 459 |
466 #if defined(OS_POSIX) | 460 #if defined(OS_POSIX) |
467 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() { | 461 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() { |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
533 } | 527 } |
534 | 528 |
535 void GpuChannel::OnScheduled() { | 529 void GpuChannel::OnScheduled() { |
536 if (handle_messages_scheduled_) | 530 if (handle_messages_scheduled_) |
537 return; | 531 return; |
538 // Post a task to handle any deferred messages. The deferred message queue is | 532 // Post a task to handle any deferred messages. The deferred message queue is |
539 // not emptied here, which ensures that OnMessageReceived will continue to | 533 // not emptied here, which ensures that OnMessageReceived will continue to |
540 // defer newly received messages until the ones in the queue have all been | 534 // defer newly received messages until the ones in the queue have all been |
541 // handled by HandleMessage. HandleMessage is invoked as a | 535 // handled by HandleMessage. HandleMessage is invoked as a |
542 // task to prevent reentrancy. | 536 // task to prevent reentrancy. |
543 base::MessageLoop::current()->PostTask( | 537 base::ThreadTaskRunnerHandle::Get()->PostTask( |
544 FROM_HERE, | 538 FROM_HERE, |
545 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr())); | 539 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr())); |
546 handle_messages_scheduled_ = true; | 540 handle_messages_scheduled_ = true; |
547 } | 541 } |
548 | 542 |
549 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 543 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
550 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 544 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
551 if (scheduled) { | 545 if (scheduled) { |
552 num_stubs_descheduled_--; | 546 num_stubs_descheduled_--; |
553 OnScheduled(); | 547 OnScheduled(); |
554 } else { | 548 } else { |
555 num_stubs_descheduled_++; | 549 num_stubs_descheduled_++; |
556 } | 550 } |
557 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 551 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
558 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 552 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
559 | 553 |
560 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 554 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
561 if (preempting_flag_.get()) { | 555 if (preempting_flag_.get()) { |
562 io_message_loop_->PostTask( | 556 io_task_runner_->PostTask( |
563 FROM_HERE, | 557 FROM_HERE, |
564 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState, | 558 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState, |
565 filter_, | 559 filter_, a_stub_is_descheduled)); |
566 a_stub_is_descheduled)); | |
567 } | 560 } |
568 } | 561 } |
569 } | 562 } |
570 | 563 |
571 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer( | 564 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer( |
572 const gfx::GLSurfaceHandle& window, | 565 const gfx::GLSurfaceHandle& window, |
573 int32 surface_id, | 566 int32 surface_id, |
574 const GPUCreateCommandBufferConfig& init_params, | 567 const GPUCreateCommandBufferConfig& init_params, |
575 int32 route_id) { | 568 int32 route_id) { |
576 TRACE_EVENT1("gpu", | 569 TRACE_EVENT1("gpu", |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
635 return router_.AddRoute(route_id, listener); | 628 return router_.AddRoute(route_id, listener); |
636 } | 629 } |
637 | 630 |
638 void GpuChannel::RemoveRoute(int32 route_id) { | 631 void GpuChannel::RemoveRoute(int32 route_id) { |
639 router_.RemoveRoute(route_id); | 632 router_.RemoveRoute(route_id); |
640 } | 633 } |
641 | 634 |
642 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() { | 635 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() { |
643 if (!preempting_flag_.get()) { | 636 if (!preempting_flag_.get()) { |
644 preempting_flag_ = new gpu::PreemptionFlag; | 637 preempting_flag_ = new gpu::PreemptionFlag; |
645 io_message_loop_->PostTask( | 638 io_task_runner_->PostTask( |
646 FROM_HERE, base::Bind( | 639 FROM_HERE, |
| 640 base::Bind( |
647 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, | 641 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, |
648 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); | 642 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); |
649 } | 643 } |
650 return preempting_flag_.get(); | 644 return preempting_flag_.get(); |
651 } | 645 } |
652 | 646 |
653 void GpuChannel::SetPreemptByFlag( | 647 void GpuChannel::SetPreemptByFlag( |
654 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { | 648 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { |
655 preempted_flag_ = preempted_flag; | 649 preempted_flag_ = preempted_flag; |
656 | 650 |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
789 // stub, we need to make sure to reschedule the GpuChannel here. | 783 // stub, we need to make sure to reschedule the GpuChannel here. |
790 if (need_reschedule) { | 784 if (need_reschedule) { |
791 // This stub won't get a chance to reschedule, so update the count now. | 785 // This stub won't get a chance to reschedule, so update the count now. |
792 StubSchedulingChanged(true); | 786 StubSchedulingChanged(true); |
793 } | 787 } |
794 } | 788 } |
795 | 789 |
796 void GpuChannel::MessageProcessed() { | 790 void GpuChannel::MessageProcessed() { |
797 messages_processed_++; | 791 messages_processed_++; |
798 if (preempting_flag_.get()) { | 792 if (preempting_flag_.get()) { |
799 io_message_loop_->PostTask( | 793 io_task_runner_->PostTask( |
800 FROM_HERE, | 794 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, |
801 base::Bind(&GpuChannelMessageFilter::MessageProcessed, | 795 filter_, messages_processed_)); |
802 filter_, | |
803 messages_processed_)); | |
804 } | 796 } |
805 } | 797 } |
806 | 798 |
807 void GpuChannel::CacheShader(const std::string& key, | 799 void GpuChannel::CacheShader(const std::string& key, |
808 const std::string& shader) { | 800 const std::string& shader) { |
809 gpu_channel_manager_->Send( | 801 gpu_channel_manager_->Send( |
810 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 802 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
811 } | 803 } |
812 | 804 |
813 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 805 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
856 } | 848 } |
857 } | 849 } |
858 } | 850 } |
859 | 851 |
860 void GpuChannel::HandleUpdateValueState( | 852 void GpuChannel::HandleUpdateValueState( |
861 unsigned int target, const gpu::ValueState& state) { | 853 unsigned int target, const gpu::ValueState& state) { |
862 pending_valuebuffer_state_->UpdateState(target, state); | 854 pending_valuebuffer_state_->UpdateState(target, state); |
863 } | 855 } |
864 | 856 |
865 } // namespace content | 857 } // namespace content |
OLD | NEW |