Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(494)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 1365563002: Make channel preemption not require view contexts for hookup (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@wakeup_gpu
Patch Set: . Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_channel.h" 5 #include "content/common/gpu/gpu_channel.h"
6 6
7 #if defined(OS_WIN) 7 #if defined(OS_WIN)
8 #include <windows.h> 8 #include <windows.h>
9 #endif 9 #endif
10 10
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 channel_messages_.push_back(msg.release()); 215 channel_messages_.push_back(msg.release());
216 if (!had_messages) 216 if (!had_messages)
217 ScheduleHandleMessage(); 217 ScheduleHandleMessage();
218 } 218 }
219 219
220 GpuChannelMessageFilter::GpuChannelMessageFilter( 220 GpuChannelMessageFilter::GpuChannelMessageFilter(
221 const base::WeakPtr<GpuChannel>& gpu_channel, 221 const base::WeakPtr<GpuChannel>& gpu_channel,
222 GpuChannelMessageQueue* message_queue, 222 GpuChannelMessageQueue* message_queue,
223 gpu::SyncPointManager* sync_point_manager, 223 gpu::SyncPointManager* sync_point_manager,
224 base::SingleThreadTaskRunner* task_runner, 224 base::SingleThreadTaskRunner* task_runner,
225 gpu::PreemptionFlag* preempting_flag,
225 bool future_sync_points) 226 bool future_sync_points)
226 : preemption_state_(IDLE), 227 : preemption_state_(IDLE),
227 gpu_channel_(gpu_channel), 228 gpu_channel_(gpu_channel),
228 message_queue_(message_queue), 229 message_queue_(message_queue),
229 sender_(nullptr), 230 sender_(nullptr),
230 peer_pid_(base::kNullProcessId), 231 peer_pid_(base::kNullProcessId),
231 sync_point_manager_(sync_point_manager), 232 sync_point_manager_(sync_point_manager),
232 task_runner_(task_runner), 233 task_runner_(task_runner),
234 preempting_flag_(preempting_flag),
233 a_stub_is_descheduled_(false), 235 a_stub_is_descheduled_(false),
234 future_sync_points_(future_sync_points) {} 236 future_sync_points_(future_sync_points) {}
235 237
236 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} 238 GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
237 239
238 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender* sender) { 240 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender* sender) {
239 DCHECK(!sender_); 241 DCHECK(!sender_);
240 sender_ = sender; 242 sender_ = sender;
241 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); 243 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>);
242 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 244 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
361 } 363 }
362 364
363 UpdatePreemptionState(); 365 UpdatePreemptionState();
364 return handled; 366 return handled;
365 } 367 }
366 368
367 void GpuChannelMessageFilter::OnMessageProcessed() { 369 void GpuChannelMessageFilter::OnMessageProcessed() {
368 UpdatePreemptionState(); 370 UpdatePreemptionState();
369 } 371 }
370 372
371 void GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState(
372 gpu::PreemptionFlag* preempting_flag,
373 bool a_stub_is_descheduled) {
374 preempting_flag_ = preempting_flag;
375 a_stub_is_descheduled_ = a_stub_is_descheduled;
376 }
377
378 void GpuChannelMessageFilter::UpdateStubSchedulingState( 373 void GpuChannelMessageFilter::UpdateStubSchedulingState(
379 bool a_stub_is_descheduled) { 374 bool a_stub_is_descheduled) {
380 a_stub_is_descheduled_ = a_stub_is_descheduled; 375 a_stub_is_descheduled_ = a_stub_is_descheduled;
381 UpdatePreemptionState(); 376 UpdatePreemptionState();
382 } 377 }
383 378
384 bool GpuChannelMessageFilter::Send(IPC::Message* message) { 379 bool GpuChannelMessageFilter::Send(IPC::Message* message) {
385 return sender_->Send(message); 380 return sender_->Send(message);
386 } 381 }
387 382
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 } 537 }
543 538
544 bool GpuChannel::StreamState::HasRoutes() const { 539 bool GpuChannel::StreamState::HasRoutes() const {
545 return !routes_.empty(); 540 return !routes_.empty();
546 } 541 }
547 542
548 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 543 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
549 GpuWatchdog* watchdog, 544 GpuWatchdog* watchdog,
550 gfx::GLShareGroup* share_group, 545 gfx::GLShareGroup* share_group,
551 gpu::gles2::MailboxManager* mailbox, 546 gpu::gles2::MailboxManager* mailbox,
547 gpu::PreemptionFlag* preempting_flag,
552 base::SingleThreadTaskRunner* task_runner, 548 base::SingleThreadTaskRunner* task_runner,
553 base::SingleThreadTaskRunner* io_task_runner, 549 base::SingleThreadTaskRunner* io_task_runner,
554 int client_id, 550 int client_id,
555 uint64_t client_tracing_id, 551 uint64_t client_tracing_id,
556 bool software,
557 bool allow_future_sync_points, 552 bool allow_future_sync_points,
558 bool allow_real_time_streams) 553 bool allow_real_time_streams)
559 : gpu_channel_manager_(gpu_channel_manager), 554 : gpu_channel_manager_(gpu_channel_manager),
560 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), 555 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
556 preempting_flag_(preempting_flag),
561 client_id_(client_id), 557 client_id_(client_id),
562 client_tracing_id_(client_tracing_id), 558 client_tracing_id_(client_tracing_id),
563 task_runner_(task_runner), 559 task_runner_(task_runner),
564 io_task_runner_(io_task_runner), 560 io_task_runner_(io_task_runner),
565 share_group_(share_group ? share_group : new gfx::GLShareGroup), 561 share_group_(share_group),
566 mailbox_manager_(mailbox 562 mailbox_manager_(mailbox),
567 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox)
568 : gpu::gles2::MailboxManager::Create()),
569 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), 563 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet),
570 pending_valuebuffer_state_(new gpu::ValueStateMap), 564 pending_valuebuffer_state_(new gpu::ValueStateMap),
571 watchdog_(watchdog), 565 watchdog_(watchdog),
572 software_(software),
573 num_stubs_descheduled_(0), 566 num_stubs_descheduled_(0),
574 allow_future_sync_points_(allow_future_sync_points), 567 allow_future_sync_points_(allow_future_sync_points),
575 allow_real_time_streams_(allow_real_time_streams), 568 allow_real_time_streams_(allow_real_time_streams),
576 weak_factory_(this) { 569 weak_factory_(this) {
577 DCHECK(gpu_channel_manager); 570 DCHECK(gpu_channel_manager);
578 DCHECK(client_id); 571 DCHECK(client_id);
579 572
580 message_queue_ = 573 message_queue_ =
581 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); 574 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner);
582 575
583 filter_ = new GpuChannelMessageFilter( 576 filter_ = new GpuChannelMessageFilter(
584 weak_factory_.GetWeakPtr(), message_queue_.get(), 577 weak_factory_.GetWeakPtr(), message_queue_.get(),
585 gpu_channel_manager_->sync_point_manager(), task_runner_.get(), 578 gpu_channel_manager_->sync_point_manager(), task_runner_.get(),
586 allow_future_sync_points_); 579 preempting_flag, allow_future_sync_points_);
587 580
588 subscription_ref_set_->AddObserver(this); 581 subscription_ref_set_->AddObserver(this);
589 } 582 }
590 583
591 GpuChannel::~GpuChannel() { 584 GpuChannel::~GpuChannel() {
592 // Clear stubs first because of dependencies. 585 // Clear stubs first because of dependencies.
593 stubs_.clear(); 586 stubs_.clear();
594 587
595 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); 588 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_);
596 589
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 io_task_runner_->PostTask( 680 io_task_runner_->PostTask(
688 FROM_HERE, 681 FROM_HERE,
689 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState, 682 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
690 filter_, a_stub_is_descheduled)); 683 filter_, a_stub_is_descheduled));
691 } 684 }
692 } 685 }
693 } 686 }
694 687
695 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer( 688 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
696 const gfx::GLSurfaceHandle& window, 689 const gfx::GLSurfaceHandle& window,
697 int32 surface_id,
698 const GPUCreateCommandBufferConfig& init_params, 690 const GPUCreateCommandBufferConfig& init_params,
699 int32 route_id) { 691 int32 route_id) {
700 TRACE_EVENT1("gpu", 692 TRACE_EVENT1("gpu", "GpuChannel::CreateViewCommandBuffer", "route_id",
701 "GpuChannel::CreateViewCommandBuffer", 693 route_id);
702 "surface_id",
703 surface_id);
704 694
705 int32 share_group_id = init_params.share_group_id; 695 int32 share_group_id = init_params.share_group_id;
706 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); 696 GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
707 697
708 if (!share_group && share_group_id != MSG_ROUTING_NONE) 698 if (!share_group && share_group_id != MSG_ROUTING_NONE)
709 return CREATE_COMMAND_BUFFER_FAILED; 699 return CREATE_COMMAND_BUFFER_FAILED;
710 700
711 int32 stream_id = init_params.stream_id; 701 int32 stream_id = init_params.stream_id;
712 GpuStreamPriority stream_priority = init_params.stream_priority; 702 GpuStreamPriority stream_priority = init_params.stream_priority;
713 703
(...skipping 12 matching lines...) Expand all
726 } 716 }
727 717
728 // Virtualize compositor contexts on OS X to prevent performance regressions 718 // Virtualize compositor contexts on OS X to prevent performance regressions
729 // when enabling FCM. 719 // when enabling FCM.
730 // http://crbug.com/180463 720 // http://crbug.com/180463
731 bool use_virtualized_gl_context = false; 721 bool use_virtualized_gl_context = false;
732 #if defined(OS_MACOSX) 722 #if defined(OS_MACOSX)
733 use_virtualized_gl_context = true; 723 use_virtualized_gl_context = true;
734 #endif 724 #endif
735 725
726 bool offscreen = false;
736 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( 727 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
737 this, task_runner_.get(), share_group, window, mailbox_manager_.get(), 728 this, task_runner_.get(), share_group, window, mailbox_manager_.get(),
738 subscription_ref_set_.get(), pending_valuebuffer_state_.get(), 729 subscription_ref_set_.get(), pending_valuebuffer_state_.get(),
739 gfx::Size(), disallowed_features_, init_params.attribs, 730 gfx::Size(), disallowed_features_, init_params.attribs,
740 init_params.gpu_preference, use_virtualized_gl_context, stream_id, 731 init_params.gpu_preference, use_virtualized_gl_context, stream_id,
741 route_id, surface_id, watchdog_, software_, init_params.active_url)); 732 route_id, offscreen, watchdog_, init_params.active_url));
742 733
743 if (preempted_flag_.get()) 734 if (preempted_flag_.get())
744 stub->SetPreemptByFlag(preempted_flag_); 735 stub->SetPreemptByFlag(preempted_flag_);
745 736
746 if (!router_.AddRoute(route_id, stub.get())) { 737 if (!router_.AddRoute(route_id, stub.get())) {
747 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): " 738 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
748 "failed to add route"; 739 "failed to add route";
749 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST; 740 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
750 } 741 }
751 742
(...skipping 23 matching lines...) Expand all
775 } 766 }
776 767
777 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) { 768 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
778 return router_.AddRoute(route_id, listener); 769 return router_.AddRoute(route_id, listener);
779 } 770 }
780 771
781 void GpuChannel::RemoveRoute(int32 route_id) { 772 void GpuChannel::RemoveRoute(int32 route_id) {
782 router_.RemoveRoute(route_id); 773 router_.RemoveRoute(route_id);
783 } 774 }
784 775
785 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
786 if (!preempting_flag_.get()) {
787 preempting_flag_ = new gpu::PreemptionFlag;
788 io_task_runner_->PostTask(
789 FROM_HERE,
790 base::Bind(
791 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
792 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
793 }
794 return preempting_flag_.get();
795 }
796
797 void GpuChannel::SetPreemptByFlag( 776 void GpuChannel::SetPreemptByFlag(
798 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { 777 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
778 DCHECK(stubs_.empty());
799 preempted_flag_ = preempted_flag; 779 preempted_flag_ = preempted_flag;
800
801 for (auto& kv : stubs_)
802 kv.second->SetPreemptByFlag(preempted_flag_);
803 } 780 }
804 781
805 void GpuChannel::OnDestroy() { 782 void GpuChannel::OnDestroy() {
806 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy"); 783 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
807 gpu_channel_manager_->RemoveChannel(client_id_); 784 gpu_channel_manager_->RemoveChannel(client_id_);
808 } 785 }
809 786
810 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 787 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
811 bool handled = true; 788 bool handled = true;
812 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 789 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
960 } 937 }
961 938
962 auto stream_it = streams_.find(stream_id); 939 auto stream_it = streams_.find(stream_id);
963 if (stream_it != streams_.end() && 940 if (stream_it != streams_.end() &&
964 stream_priority != GpuStreamPriority::INHERIT && 941 stream_priority != GpuStreamPriority::INHERIT &&
965 stream_priority != stream_it->second.priority()) { 942 stream_priority != stream_it->second.priority()) {
966 *succeeded = false; 943 *succeeded = false;
967 return; 944 return;
968 } 945 }
969 946
947 bool offscreen = true;
970 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( 948 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
971 this, task_runner_.get(), share_group, gfx::GLSurfaceHandle(), 949 this, task_runner_.get(), share_group, gfx::GLSurfaceHandle(),
972 mailbox_manager_.get(), subscription_ref_set_.get(), 950 mailbox_manager_.get(), subscription_ref_set_.get(),
973 pending_valuebuffer_state_.get(), size, disallowed_features_, 951 pending_valuebuffer_state_.get(), size, disallowed_features_,
974 init_params.attribs, init_params.gpu_preference, false, 952 init_params.attribs, init_params.gpu_preference, false,
975 init_params.stream_id, route_id, 0, watchdog_, software_, 953 init_params.stream_id, route_id, offscreen, watchdog_,
976 init_params.active_url)); 954 init_params.active_url));
977 955
978 if (preempted_flag_.get()) 956 if (preempted_flag_.get())
979 stub->SetPreemptByFlag(preempted_flag_); 957 stub->SetPreemptByFlag(preempted_flag_);
980 958
981 if (!router_.AddRoute(route_id, stub.get())) { 959 if (!router_.AddRoute(route_id, stub.get())) {
982 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): " 960 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
983 "failed to add route"; 961 "failed to add route";
984 *succeeded = false; 962 *succeeded = false;
985 return; 963 return;
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
1093 } 1071 }
1094 } 1072 }
1095 } 1073 }
1096 1074
1097 void GpuChannel::HandleUpdateValueState( 1075 void GpuChannel::HandleUpdateValueState(
1098 unsigned int target, const gpu::ValueState& state) { 1076 unsigned int target, const gpu::ValueState& state) {
1099 pending_valuebuffer_state_->UpdateState(target, state); 1077 pending_valuebuffer_state_->UpdateState(target, state);
1100 } 1078 }
1101 1079
1102 } // namespace content 1080 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698