Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 1135943005: Revert of content/common: Remove use of MessageLoopProxy and deprecated MessageLoop APIs (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
11 #include <queue> 11 #include <queue>
12 #include <vector> 12 #include <vector>
13 13
14 #include "base/bind.h" 14 #include "base/bind.h"
15 #include "base/command_line.h" 15 #include "base/command_line.h"
16 #include "base/location.h" 16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/single_thread_task_runner.h"
18 #include "base/stl_util.h" 17 #include "base/stl_util.h"
19 #include "base/strings/string_util.h" 18 #include "base/strings/string_util.h"
20 #include "base/thread_task_runner_handle.h"
21 #include "base/timer/timer.h" 19 #include "base/timer/timer.h"
22 #include "base/trace_event/trace_event.h" 20 #include "base/trace_event/trace_event.h"
23 #include "content/common/gpu/gpu_channel_manager.h" 21 #include "content/common/gpu/gpu_channel_manager.h"
24 #include "content/common/gpu/gpu_memory_buffer_factory.h" 22 #include "content/common/gpu/gpu_memory_buffer_factory.h"
25 #include "content/common/gpu/gpu_messages.h" 23 #include "content/common/gpu/gpu_messages.h"
26 #include "content/public/common/content_switches.h" 24 #include "content/public/common/content_switches.h"
27 #include "gpu/command_buffer/common/mailbox.h" 25 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/common/value_state.h" 26 #include "gpu/command_buffer/common/value_state.h"
29 #include "gpu/command_buffer/service/gpu_scheduler.h" 27 #include "gpu/command_buffer/service/gpu_scheduler.h"
30 #include "gpu/command_buffer/service/image_factory.h" 28 #include "gpu/command_buffer/service/image_factory.h"
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO 70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
73 // thread, generating the sync point ID and responding immediately, and then 71 // thread, generating the sync point ID and responding immediately, and then
74 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message 72 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
75 // into the channel's queue. 73 // into the channel's queue.
76 // - it generates mailbox names for clients of the GPU process on the IO thread. 74 // - it generates mailbox names for clients of the GPU process on the IO thread.
77 class GpuChannelMessageFilter : public IPC::MessageFilter { 75 class GpuChannelMessageFilter : public IPC::MessageFilter {
78 public: 76 public:
79 GpuChannelMessageFilter( 77 GpuChannelMessageFilter(
80 base::WeakPtr<GpuChannel> gpu_channel, 78 base::WeakPtr<GpuChannel> gpu_channel,
81 scoped_refptr<gpu::SyncPointManager> sync_point_manager, 79 scoped_refptr<gpu::SyncPointManager> sync_point_manager,
82 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 80 scoped_refptr<base::MessageLoopProxy> message_loop,
83 bool future_sync_points) 81 bool future_sync_points)
84 : preemption_state_(IDLE), 82 : preemption_state_(IDLE),
85 gpu_channel_(gpu_channel), 83 gpu_channel_(gpu_channel),
86 sender_(NULL), 84 sender_(NULL),
87 sync_point_manager_(sync_point_manager), 85 sync_point_manager_(sync_point_manager),
88 task_runner_(task_runner), 86 message_loop_(message_loop),
89 messages_forwarded_to_channel_(0), 87 messages_forwarded_to_channel_(0),
90 a_stub_is_descheduled_(false), 88 a_stub_is_descheduled_(false),
91 future_sync_points_(future_sync_points) {} 89 future_sync_points_(future_sync_points) {}
92 90
93 void OnFilterAdded(IPC::Sender* sender) override { 91 void OnFilterAdded(IPC::Sender* sender) override {
94 DCHECK(!sender_); 92 DCHECK(!sender_);
95 sender_ = sender; 93 sender_ = sender;
96 } 94 }
97 95
98 void OnFilterRemoved() override { 96 void OnFilterRemoved() override {
(...skipping 23 matching lines...) Expand all
122 } 120 }
123 if (!future_sync_points_ && !get<0>(retire)) { 121 if (!future_sync_points_ && !get<0>(retire)) {
124 LOG(ERROR) << "Untrusted contexts can't create future sync points"; 122 LOG(ERROR) << "Untrusted contexts can't create future sync points";
125 reply->set_reply_error(); 123 reply->set_reply_error();
126 Send(reply); 124 Send(reply);
127 return true; 125 return true;
128 } 126 }
129 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); 127 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
130 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); 128 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
131 Send(reply); 129 Send(reply);
132 task_runner_->PostTask( 130 message_loop_->PostTask(
133 FROM_HERE, 131 FROM_HERE,
134 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, 132 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
135 gpu_channel_, sync_point_manager_, message.routing_id(), 133 gpu_channel_,
136 get<0>(retire), sync_point)); 134 sync_point_manager_,
135 message.routing_id(),
136 get<0>(retire),
137 sync_point));
137 handled = true; 138 handled = true;
138 } 139 }
139 140
140 // All other messages get processed by the GpuChannel. 141 // All other messages get processed by the GpuChannel.
141 messages_forwarded_to_channel_++; 142 messages_forwarded_to_channel_++;
142 if (preempting_flag_.get()) 143 if (preempting_flag_.get())
143 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); 144 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
144 UpdatePreemptionState(); 145 UpdatePreemptionState();
145 146
146 return handled; 147 return handled;
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
375 } 376 }
376 } 377 }
377 manager->RetireSyncPoint(sync_point); 378 manager->RetireSyncPoint(sync_point);
378 } 379 }
379 380
380 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only 381 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
381 // passed through - therefore the WeakPtr assumptions are respected. 382 // passed through - therefore the WeakPtr assumptions are respected.
382 base::WeakPtr<GpuChannel> gpu_channel_; 383 base::WeakPtr<GpuChannel> gpu_channel_;
383 IPC::Sender* sender_; 384 IPC::Sender* sender_;
384 scoped_refptr<gpu::SyncPointManager> sync_point_manager_; 385 scoped_refptr<gpu::SyncPointManager> sync_point_manager_;
385 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; 386 scoped_refptr<base::MessageLoopProxy> message_loop_;
386 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; 387 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
387 388
388 std::queue<PendingMessage> pending_messages_; 389 std::queue<PendingMessage> pending_messages_;
389 390
390 // Count of the number of IPCs forwarded to the GpuChannel. 391 // Count of the number of IPCs forwarded to the GpuChannel.
391 uint64 messages_forwarded_to_channel_; 392 uint64 messages_forwarded_to_channel_;
392 393
393 base::OneShotTimer<GpuChannelMessageFilter> timer_; 394 base::OneShotTimer<GpuChannelMessageFilter> timer_;
394 395
395 bool a_stub_is_descheduled_; 396 bool a_stub_is_descheduled_;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
429 subscription_ref_set_->AddObserver(this); 430 subscription_ref_set_->AddObserver(this);
430 } 431 }
431 432
432 GpuChannel::~GpuChannel() { 433 GpuChannel::~GpuChannel() {
433 STLDeleteElements(&deferred_messages_); 434 STLDeleteElements(&deferred_messages_);
434 subscription_ref_set_->RemoveObserver(this); 435 subscription_ref_set_->RemoveObserver(this);
435 if (preempting_flag_.get()) 436 if (preempting_flag_.get())
436 preempting_flag_->Reset(); 437 preempting_flag_->Reset();
437 } 438 }
438 439
439 void GpuChannel::Init(base::SingleThreadTaskRunner* io_task_runner, 440 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
440 base::WaitableEvent* shutdown_event) { 441 base::WaitableEvent* shutdown_event) {
441 DCHECK(!channel_.get()); 442 DCHECK(!channel_.get());
442 443
443 // Map renderer ID to a (single) channel to that process. 444 // Map renderer ID to a (single) channel to that process.
444 channel_ = 445 channel_ = IPC::SyncChannel::Create(channel_id_,
445 IPC::SyncChannel::Create(channel_id_, IPC::Channel::MODE_SERVER, this, 446 IPC::Channel::MODE_SERVER,
446 io_task_runner, false, shutdown_event); 447 this,
448 io_message_loop,
449 false,
450 shutdown_event);
447 451
448 filter_ = new GpuChannelMessageFilter( 452 filter_ =
449 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), 453 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
450 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_); 454 gpu_channel_manager_->sync_point_manager(),
451 io_task_runner_ = io_task_runner; 455 base::MessageLoopProxy::current(),
456 allow_future_sync_points_);
457 io_message_loop_ = io_message_loop;
452 channel_->AddFilter(filter_.get()); 458 channel_->AddFilter(filter_.get());
453 pending_valuebuffer_state_ = new gpu::ValueStateMap(); 459 pending_valuebuffer_state_ = new gpu::ValueStateMap();
454 } 460 }
455 461
456 std::string GpuChannel::GetChannelName() { 462 std::string GpuChannel::GetChannelName() {
457 return channel_id_; 463 return channel_id_;
458 } 464 }
459 465
460 #if defined(OS_POSIX) 466 #if defined(OS_POSIX)
461 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() { 467 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() {
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
527 } 533 }
528 534
529 void GpuChannel::OnScheduled() { 535 void GpuChannel::OnScheduled() {
530 if (handle_messages_scheduled_) 536 if (handle_messages_scheduled_)
531 return; 537 return;
532 // Post a task to handle any deferred messages. The deferred message queue is 538 // Post a task to handle any deferred messages. The deferred message queue is
533 // not emptied here, which ensures that OnMessageReceived will continue to 539 // not emptied here, which ensures that OnMessageReceived will continue to
534 // defer newly received messages until the ones in the queue have all been 540 // defer newly received messages until the ones in the queue have all been
535 // handled by HandleMessage. HandleMessage is invoked as a 541 // handled by HandleMessage. HandleMessage is invoked as a
536 // task to prevent reentrancy. 542 // task to prevent reentrancy.
537 base::ThreadTaskRunnerHandle::Get()->PostTask( 543 base::MessageLoop::current()->PostTask(
538 FROM_HERE, 544 FROM_HERE,
539 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr())); 545 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
540 handle_messages_scheduled_ = true; 546 handle_messages_scheduled_ = true;
541 } 547 }
542 548
543 void GpuChannel::StubSchedulingChanged(bool scheduled) { 549 void GpuChannel::StubSchedulingChanged(bool scheduled) {
544 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; 550 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
545 if (scheduled) { 551 if (scheduled) {
546 num_stubs_descheduled_--; 552 num_stubs_descheduled_--;
547 OnScheduled(); 553 OnScheduled();
548 } else { 554 } else {
549 num_stubs_descheduled_++; 555 num_stubs_descheduled_++;
550 } 556 }
551 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); 557 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
552 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; 558 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
553 559
554 if (a_stub_is_descheduled != a_stub_was_descheduled) { 560 if (a_stub_is_descheduled != a_stub_was_descheduled) {
555 if (preempting_flag_.get()) { 561 if (preempting_flag_.get()) {
556 io_task_runner_->PostTask( 562 io_message_loop_->PostTask(
557 FROM_HERE, 563 FROM_HERE,
558 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState, 564 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
559 filter_, a_stub_is_descheduled)); 565 filter_,
566 a_stub_is_descheduled));
560 } 567 }
561 } 568 }
562 } 569 }
563 570
564 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer( 571 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
565 const gfx::GLSurfaceHandle& window, 572 const gfx::GLSurfaceHandle& window,
566 int32 surface_id, 573 int32 surface_id,
567 const GPUCreateCommandBufferConfig& init_params, 574 const GPUCreateCommandBufferConfig& init_params,
568 int32 route_id) { 575 int32 route_id) {
569 TRACE_EVENT1("gpu", 576 TRACE_EVENT1("gpu",
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
628 return router_.AddRoute(route_id, listener); 635 return router_.AddRoute(route_id, listener);
629 } 636 }
630 637
631 void GpuChannel::RemoveRoute(int32 route_id) { 638 void GpuChannel::RemoveRoute(int32 route_id) {
632 router_.RemoveRoute(route_id); 639 router_.RemoveRoute(route_id);
633 } 640 }
634 641
635 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() { 642 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
636 if (!preempting_flag_.get()) { 643 if (!preempting_flag_.get()) {
637 preempting_flag_ = new gpu::PreemptionFlag; 644 preempting_flag_ = new gpu::PreemptionFlag;
638 io_task_runner_->PostTask( 645 io_message_loop_->PostTask(
639 FROM_HERE, 646 FROM_HERE, base::Bind(
640 base::Bind(
641 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, 647 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
642 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); 648 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
643 } 649 }
644 return preempting_flag_.get(); 650 return preempting_flag_.get();
645 } 651 }
646 652
647 void GpuChannel::SetPreemptByFlag( 653 void GpuChannel::SetPreemptByFlag(
648 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { 654 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
649 preempted_flag_ = preempted_flag; 655 preempted_flag_ = preempted_flag;
650 656
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
783 // stub, we need to make sure to reschedule the GpuChannel here. 789 // stub, we need to make sure to reschedule the GpuChannel here.
784 if (need_reschedule) { 790 if (need_reschedule) {
785 // This stub won't get a chance to reschedule, so update the count now. 791 // This stub won't get a chance to reschedule, so update the count now.
786 StubSchedulingChanged(true); 792 StubSchedulingChanged(true);
787 } 793 }
788 } 794 }
789 795
790 void GpuChannel::MessageProcessed() { 796 void GpuChannel::MessageProcessed() {
791 messages_processed_++; 797 messages_processed_++;
792 if (preempting_flag_.get()) { 798 if (preempting_flag_.get()) {
793 io_task_runner_->PostTask( 799 io_message_loop_->PostTask(
794 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, 800 FROM_HERE,
795 filter_, messages_processed_)); 801 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
802 filter_,
803 messages_processed_));
796 } 804 }
797 } 805 }
798 806
799 void GpuChannel::CacheShader(const std::string& key, 807 void GpuChannel::CacheShader(const std::string& key,
800 const std::string& shader) { 808 const std::string& shader) {
801 gpu_channel_manager_->Send( 809 gpu_channel_manager_->Send(
802 new GpuHostMsg_CacheShader(client_id_, key, shader)); 810 new GpuHostMsg_CacheShader(client_id_, key, shader));
803 } 811 }
804 812
805 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { 813 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
848 } 856 }
849 } 857 }
850 } 858 }
851 859
852 void GpuChannel::HandleUpdateValueState( 860 void GpuChannel::HandleUpdateValueState(
853 unsigned int target, const gpu::ValueState& state) { 861 unsigned int target, const gpu::ValueState& state) {
854 pending_valuebuffer_state_->UpdateState(target, state); 862 pending_valuebuffer_state_->UpdateState(target, state);
855 } 863 }
856 864
857 } // namespace content 865 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698