Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(125)

Side by Side Diff: gpu/ipc/service/gpu_channel.cc

Issue 2870333003: gpu: Remove gpu channel filter and queue from header. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/service/gpu_channel.h" 5 #include "gpu/ipc/service/gpu_channel.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #if defined(OS_WIN) 9 #if defined(OS_WIN)
10 #include <windows.h> 10 #include <windows.h>
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
65 // below this threshold. 65 // below this threshold.
66 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs; 66 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs;
67 67
68 CommandBufferId GenerateCommandBufferId(int channel_id, int32_t route_id) { 68 CommandBufferId GenerateCommandBufferId(int channel_id, int32_t route_id) {
69 return CommandBufferId::FromUnsafeValue( 69 return CommandBufferId::FromUnsafeValue(
70 (static_cast<uint64_t>(channel_id) << 32) | route_id); 70 (static_cast<uint64_t>(channel_id) << 32) | route_id);
71 } 71 }
72 72
73 } // anonymous namespace 73 } // anonymous namespace
74 74
75 struct GpuChannelMessage {
76 IPC::Message message;
77 uint32_t order_number;
78 base::TimeTicks time_received;
79
80 GpuChannelMessage(const IPC::Message& msg,
81 uint32_t order_num,
82 base::TimeTicks ts)
83 : message(msg), order_number(order_num), time_received(ts) {}
84
85 private:
86 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage);
87 };
88
89 // This message queue counts and timestamps each message forwarded to the
90 // channel so that we can preempt other channels if a message takes too long to
91 // process. To guarantee fairness, we must wait a minimum amount of time before
92 // preempting and we limit the amount of time that we can preempt in one shot
93 // (see constants above).
94 class GpuChannelMessageQueue
95 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
96 public:
97 GpuChannelMessageQueue(
98 GpuChannel* channel,
99 scoped_refptr<SyncPointOrderData> sync_point_order_data,
100 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
101 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
102 scoped_refptr<PreemptionFlag> preempting_flag,
103 scoped_refptr<PreemptionFlag> preempted_flag);
104
105 void Destroy();
106
107 SequenceId sequence_id() const {
108 return sync_point_order_data_->sequence_id();
109 }
110
111 bool IsScheduled() const;
112 void SetScheduled(bool scheduled);
113
114 // Should be called before a message begins to be processed. Returns false if
115 // there are no messages to process.
116 const GpuChannelMessage* BeginMessageProcessing();
117 // Should be called if a message began processing but did not finish.
118 void PauseMessageProcessing();
119 // Should be called if a message is completely processed. Returns true if
120 // there are more messages to process.
121 void FinishMessageProcessing();
122
123 void PushBackMessage(const IPC::Message& message);
124
125 private:
126 enum PreemptionState {
127 // Either there's no other channel to preempt, there are no messages
128 // pending processing, or we just finished preempting and have to wait
129 // before preempting again.
130 IDLE,
131 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
132 WAITING,
133 // We can preempt whenever any IPC processing takes more than
134 // kPreemptWaitTimeMs.
135 CHECKING,
136 // We are currently preempting (i.e. no stub is descheduled).
137 PREEMPTING,
138 // We would like to preempt, but some stub is descheduled.
139 WOULD_PREEMPT_DESCHEDULED,
140 };
141
142 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
143
144 ~GpuChannelMessageQueue();
145
146 void PostHandleMessageOnQueue();
147
148 void UpdatePreemptionState();
149 void UpdatePreemptionStateHelper();
150
151 void UpdateStateIdle();
152 void UpdateStateWaiting();
153 void UpdateStateChecking();
154 void UpdateStatePreempting();
155 void UpdateStateWouldPreemptDescheduled();
156
157 void TransitionToIdle();
158 void TransitionToWaiting();
159 void TransitionToChecking();
160 void TransitionToPreempting();
161 void TransitionToWouldPreemptDescheduled();
162
163 bool ShouldTransitionToIdle() const;
164
165 // These can be accessed from both IO and main threads and are protected by
166 // |channel_lock_|.
167 bool scheduled_ = true;
168 GpuChannel* channel_ = nullptr; // set to nullptr on Destroy
169 std::deque<std::unique_ptr<GpuChannelMessage>> channel_messages_;
170 bool handle_message_post_task_pending_ = false;
171 mutable base::Lock channel_lock_;
172
173 // The following are accessed on the IO thread only.
174 // No lock is necessary for preemption state because it's only accessed on the
175 // IO thread.
176 PreemptionState preemption_state_ = IDLE;
177 // Maximum amount of time that we can spend in PREEMPTING.
178 // It is reset when we transition to IDLE.
179 base::TimeDelta max_preemption_time_;
180 // This timer is used and runs tasks on the IO thread.
181 std::unique_ptr<base::OneShotTimer> timer_;
182 base::ThreadChecker io_thread_checker_;
183
184 // Keeps track of sync point related state such as message order numbers.
185 scoped_refptr<SyncPointOrderData> sync_point_order_data_;
186
187 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
188 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
189 scoped_refptr<PreemptionFlag> preempting_flag_;
190 scoped_refptr<PreemptionFlag> preempted_flag_;
191
192 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
193 };
194
195 // This filter does the following:
196 // - handles the Nop message used for verifying sync tokens on the IO thread
197 // - forwards messages to child message filters
198 // - posts control and out of order messages to the main thread
199 // - forwards other messages to the message queue or the scheduler
200 class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
201 public:
202 GpuChannelMessageFilter(
203 GpuChannel* gpu_channel,
204 Scheduler* scheduler,
205 scoped_refptr<GpuChannelMessageQueue> message_queue,
206 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner);
207
208 // Methods called on main thread.
209 void Destroy();
210
211 // Called when scheduler is enabled.
212 void AddRoute(int32_t route_id, SequenceId sequence_id);
213 void RemoveRoute(int32_t route_id);
214
215 // Methods called on IO thread.
216 // IPC::MessageFilter implementation.
217 void OnFilterAdded(IPC::Channel* channel) override;
218 void OnFilterRemoved() override;
219 void OnChannelConnected(int32_t peer_pid) override;
220 void OnChannelError() override;
221 void OnChannelClosing() override;
222 bool OnMessageReceived(const IPC::Message& message) override;
223
224 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
225 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
226
227 private:
228 ~GpuChannelMessageFilter() override;
229
230 bool MessageErrorHandler(const IPC::Message& message, const char* error_msg);
231
232 IPC::Channel* ipc_channel_ = nullptr;
233 base::ProcessId peer_pid_ = base::kNullProcessId;
234 std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
235
236 GpuChannel* gpu_channel_ = nullptr;
237 // Map of route id to scheduler sequence id.
238 base::flat_map<int32_t, SequenceId> route_sequences_;
239 mutable base::Lock gpu_channel_lock_;
240
241 Scheduler* scheduler_;
242 scoped_refptr<GpuChannelMessageQueue> message_queue_;
243 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
244
245 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
246 };
247
75 SyncChannelFilteredSender::SyncChannelFilteredSender( 248 SyncChannelFilteredSender::SyncChannelFilteredSender(
76 IPC::ChannelHandle channel_handle, 249 IPC::ChannelHandle channel_handle,
77 IPC::Listener* listener, 250 IPC::Listener* listener,
78 scoped_refptr<base::SingleThreadTaskRunner> ipc_task_runner, 251 scoped_refptr<base::SingleThreadTaskRunner> ipc_task_runner,
79 base::WaitableEvent* shutdown_event) 252 base::WaitableEvent* shutdown_event)
80 : channel_(IPC::SyncChannel::Create(channel_handle, 253 : channel_(IPC::SyncChannel::Create(channel_handle,
81 IPC::Channel::MODE_SERVER, 254 IPC::Channel::MODE_SERVER,
82 listener, 255 listener,
83 ipc_task_runner, 256 ipc_task_runner,
84 false, 257 false,
(...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 DCHECK(ipc_channel_); 681 DCHECK(ipc_channel_);
509 682
510 if (!gpu_channel_) 683 if (!gpu_channel_)
511 return MessageErrorHandler(message, "Channel destroyed"); 684 return MessageErrorHandler(message, "Channel destroyed");
512 685
513 if (message.should_unblock() || message.is_reply()) 686 if (message.should_unblock() || message.is_reply())
514 return MessageErrorHandler(message, "Unexpected message type"); 687 return MessageErrorHandler(message, "Unexpected message type");
515 688
516 if (message.type() == GpuChannelMsg_Nop::ID) { 689 if (message.type() == GpuChannelMsg_Nop::ID) {
517 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 690 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
518 Send(reply); 691 ipc_channel_->Send(reply);
519 return true; 692 return true;
520 } 693 }
521 694
522 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 695 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
523 if (filter->OnMessageReceived(message)) 696 if (filter->OnMessageReceived(message))
524 return true; 697 return true;
525 } 698 }
526 699
527 base::AutoLock auto_lock(gpu_channel_lock_); 700 base::AutoLock auto_lock(gpu_channel_lock_);
528 if (!gpu_channel_) 701 if (!gpu_channel_)
(...skipping 25 matching lines...) Expand all
554 gpu_channel_->AsWeakPtr(), message), 727 gpu_channel_->AsWeakPtr(), message),
555 sync_token_fences); 728 sync_token_fences);
556 } else { 729 } else {
557 // Message queue takes care of PostTask. 730 // Message queue takes care of PostTask.
558 message_queue_->PushBackMessage(message); 731 message_queue_->PushBackMessage(message);
559 } 732 }
560 733
561 return true; 734 return true;
562 } 735 }
563 736
564 bool GpuChannelMessageFilter::Send(IPC::Message* message) {
565 return ipc_channel_->Send(message);
566 }
567
568 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, 737 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message,
569 const char* error_msg) { 738 const char* error_msg) {
570 DLOG(ERROR) << error_msg; 739 DLOG(ERROR) << error_msg;
571 if (message.is_sync()) { 740 if (message.is_sync()) {
572 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 741 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
573 reply->set_reply_error(); 742 reply->set_reply_error();
574 Send(reply); 743 ipc_channel_->Send(reply);
575 } 744 }
576 return true; 745 return true;
577 } 746 }
578 747
579 // Definitions for constructor and destructor of this interface are needed to 748 // Definitions for constructor and destructor of this interface are needed to
580 // avoid MSVC LNK2019. 749 // avoid MSVC LNK2019.
581 FilteredSender::FilteredSender() = default; 750 FilteredSender::FilteredSender() = default;
582 751
583 FilteredSender::~FilteredSender() = default; 752 FilteredSender::~FilteredSender() = default;
584 753
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
791 if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) { 960 if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
792 DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() || 961 DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
793 (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type()); 962 (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
794 DCHECK_EQ(stub->IsScheduled(), message_queue_->IsScheduled()); 963 DCHECK_EQ(stub->IsScheduled(), message_queue_->IsScheduled());
795 message_queue_->PauseMessageProcessing(); 964 message_queue_->PauseMessageProcessing();
796 } else { 965 } else {
797 message_queue_->FinishMessageProcessing(); 966 message_queue_->FinishMessageProcessing();
798 } 967 }
799 } 968 }
800 969
970 void GpuChannel::HandleMessageForTesting(const IPC::Message& msg) {
971 // Message filter gets message first on IO thread.
972 filter_->OnMessageReceived(msg);
973 }
974
801 void GpuChannel::HandleMessageHelper(const IPC::Message& msg) { 975 void GpuChannel::HandleMessageHelper(const IPC::Message& msg) {
802 int32_t routing_id = msg.routing_id(); 976 int32_t routing_id = msg.routing_id();
803 977
804 bool handled = false; 978 bool handled = false;
805 if (routing_id == MSG_ROUTING_CONTROL) { 979 if (routing_id == MSG_ROUTING_CONTROL) {
806 handled = OnControlMessageReceived(msg); 980 handled = OnControlMessageReceived(msg);
807 } else { 981 } else {
808 handled = router_.RouteMessage(msg); 982 handled = router_.RouteMessage(msg);
809 } 983 }
810 984
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 1191
1018 return manager->gpu_memory_buffer_factory() 1192 return manager->gpu_memory_buffer_factory()
1019 ->AsImageFactory() 1193 ->AsImageFactory()
1020 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, 1194 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat,
1021 client_id_, surface_handle); 1195 client_id_, surface_handle);
1022 } 1196 }
1023 } 1197 }
1024 } 1198 }
1025 1199
1026 } // namespace gpu 1200 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698