Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(671)

Side by Side Diff: gpu/ipc/service/gpu_channel.h

Issue 2440093003: WIP GPU scheduler + delayed activation / tile draw
Patch Set: SignalSyncToken -> IsFenceSyncReleased Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/service/BUILD.gn ('k') | gpu/ipc/service/gpu_channel.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef GPU_IPC_SERVICE_GPU_CHANNEL_H_ 5 #ifndef GPU_IPC_SERVICE_GPU_CHANNEL_H_
6 #define GPU_IPC_SERVICE_GPU_CHANNEL_H_ 6 #define GPU_IPC_SERVICE_GPU_CHANNEL_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include <deque>
11 #include <memory> 12 #include <memory>
12 #include <string> 13 #include <string>
14 #include <unordered_map>
15 #include <vector>
13 16
14 #include "base/containers/hash_tables.h" 17 #include "base/containers/hash_tables.h"
15 #include "base/containers/scoped_ptr_hash_map.h" 18 #include "base/containers/scoped_ptr_hash_map.h"
16 #include "base/macros.h" 19 #include "base/macros.h"
17 #include "base/memory/ref_counted.h" 20 #include "base/memory/ref_counted.h"
18 #include "base/memory/weak_ptr.h" 21 #include "base/memory/weak_ptr.h"
19 #include "base/process/process.h" 22 #include "base/process/process.h"
20 #include "base/threading/thread_checker.h" 23 #include "base/threading/thread_checker.h"
21 #include "base/trace_event/memory_dump_provider.h" 24 #include "base/trace_event/memory_dump_provider.h"
22 #include "build/build_config.h" 25 #include "build/build_config.h"
23 #include "gpu/gpu_export.h" 26 #include "gpu/gpu_export.h"
24 #include "gpu/ipc/common/gpu_stream_constants.h" 27 #include "gpu/ipc/common/gpu_stream_constants.h"
25 #include "gpu/ipc/service/gpu_command_buffer_stub.h" 28 #include "gpu/ipc/service/gpu_command_buffer_stub.h"
29 #include "gpu/ipc/service/gpu_command_stream.h"
26 #include "gpu/ipc/service/gpu_memory_manager.h" 30 #include "gpu/ipc/service/gpu_memory_manager.h"
27 #include "ipc/ipc_sync_channel.h" 31 #include "ipc/ipc_sync_channel.h"
28 #include "ipc/message_router.h" 32 #include "ipc/message_router.h"
29 #include "ui/gfx/geometry/size.h" 33 #include "ui/gfx/geometry/size.h"
30 #include "ui/gfx/native_widget_types.h" 34 #include "ui/gfx/native_widget_types.h"
31 #include "ui/gl/gl_share_group.h" 35 #include "ui/gl/gl_share_group.h"
32 #include "ui/gl/gpu_preference.h" 36 #include "ui/gl/gpu_preference.h"
33 37
34 struct GPUCreateCommandBufferConfig; 38 struct GPUCreateCommandBufferConfig;
35 39
36 namespace base { 40 namespace base {
37 class WaitableEvent; 41 class WaitableEvent;
38 } 42 }
39 43
40 namespace IPC { 44 namespace IPC {
41 class MessageFilter; 45 class MessageFilter;
42 } 46 }
43 47
44 namespace gpu { 48 namespace gpu {
45 49
46 class PreemptionFlag; 50 class PreemptionFlag;
47 class SyncPointOrderData; 51 class SyncPointOrderData;
48 class SyncPointManager; 52 class SyncPointManager;
49 class GpuChannelManager; 53 class GpuChannelManager;
50 class GpuChannelMessageFilter; 54 class GpuChannelMessageFilter;
51 class GpuChannelMessageQueue; 55 class GpuChannelMessageQueue;
52 class GpuWatchdogThread; 56 class GpuWatchdogThread;
57 class GpuScheduler;
53 58
54 // Encapsulates an IPC channel between the GPU process and one renderer 59 // Encapsulates an IPC channel between the GPU process and one renderer
55 // process. On the renderer side there's a corresponding GpuChannelHost. 60 // process. On the renderer side there's a corresponding GpuChannelHost.
56 class GPU_EXPORT GpuChannel 61 class GPU_EXPORT GpuChannel : public IPC::Listener,
57 : public IPC::Listener, 62 public IPC::Sender,
58 public IPC::Sender { 63 public base::SupportsWeakPtr<GpuChannel> {
59 public: 64 public:
60 // Takes ownership of the renderer process handle. 65 // Takes ownership of the renderer process handle.
61 GpuChannel(GpuChannelManager* gpu_channel_manager, 66 GpuChannel(GpuChannelManager* gpu_channel_manager,
62 SyncPointManager* sync_point_manager, 67 SyncPointManager* sync_point_manager,
63 GpuWatchdogThread* watchdog, 68 GpuWatchdogThread* watchdog,
64 gl::GLShareGroup* share_group, 69 GpuScheduler* scheduler,
65 gles2::MailboxManager* mailbox_manager, 70 scoped_refptr<gl::GLShareGroup> share_group,
66 PreemptionFlag* preempting_flag, 71 scoped_refptr<gles2::MailboxManager> mailbox_manager,
67 PreemptionFlag* preempted_flag, 72 scoped_refptr<PreemptionFlag> preempting_flag,
68 base::SingleThreadTaskRunner* task_runner, 73 scoped_refptr<PreemptionFlag> preemption_flag,
69 base::SingleThreadTaskRunner* io_task_runner, 74 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
75 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
70 int32_t client_id, 76 int32_t client_id,
71 uint64_t client_tracing_id, 77 uint64_t client_tracing_id,
72 bool allow_view_command_buffers, 78 bool allow_view_command_buffers,
73 bool allow_real_time_streams); 79 bool allow_real_time_streams);
74 ~GpuChannel() override; 80 ~GpuChannel() override;
75 81
76 // Initializes the IPC channel. Caller takes ownership of the client FD in 82 // Initializes the IPC channel. Caller takes ownership of the client FD in
77 // the returned handle and is responsible for closing it. 83 // the returned handle and is responsible for closing it.
78 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event); 84 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event);
79 85
80 void SetUnhandledMessageListener(IPC::Listener* listener); 86 void SetUnhandledMessageListener(IPC::Listener* listener);
81 87
82 // Get the GpuChannelManager that owns this channel. 88 // Get the GpuChannelManager that owns this channel.
83 GpuChannelManager* gpu_channel_manager() const { 89 GpuChannelManager* gpu_channel_manager() const {
84 return gpu_channel_manager_; 90 return gpu_channel_manager_;
85 } 91 }
86 92
87 SyncPointManager* sync_point_manager() const { return sync_point_manager_; } 93 SyncPointManager* sync_point_manager() const { return sync_point_manager_; }
88 94
89 GpuWatchdogThread* watchdog() const { return watchdog_; } 95 GpuWatchdogThread* watchdog() const { return watchdog_; }
90 96
91 const scoped_refptr<gles2::MailboxManager>& mailbox_manager() const { 97 const scoped_refptr<gles2::MailboxManager>& mailbox_manager() const {
92 return mailbox_manager_; 98 return mailbox_manager_;
93 } 99 }
94 100
95 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner() const { 101 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner() const {
96 return task_runner_; 102 return task_runner_;
97 } 103 }
98 104
99 const scoped_refptr<PreemptionFlag>& preempted_flag() const {
100 return preempted_flag_;
101 }
102
103 virtual base::ProcessId GetClientPID() const; 105 virtual base::ProcessId GetClientPID() const;
104 106
105 int client_id() const { return client_id_; } 107 int client_id() const { return client_id_; }
106 108
107 uint64_t client_tracing_id() const { return client_tracing_id_; } 109 uint64_t client_tracing_id() const { return client_tracing_id_; }
108 110
109 base::WeakPtr<GpuChannel> AsWeakPtr();
110
111 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const { 111 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
112 return io_task_runner_; 112 return io_task_runner_;
113 } 113 }
114 114
115 base::Callback<bool(void)> GetPreemptionCallback() const;
116
115 // IPC::Listener implementation: 117 // IPC::Listener implementation:
116 bool OnMessageReceived(const IPC::Message& msg) override; 118 bool OnMessageReceived(const IPC::Message& msg) override;
117 void OnChannelConnected(int32_t peer_pid) override; 119 void OnChannelConnected(int32_t peer_pid) override;
118 void OnChannelError() override; 120 void OnChannelError() override;
119 121
120 // IPC::Sender implementation: 122 // IPC::Sender implementation:
121 bool Send(IPC::Message* msg) override; 123 bool Send(IPC::Message* msg) override;
122 124
123 void OnStreamRescheduled(int32_t stream_id, bool scheduled); 125 void ScheduleCommandBuffer(GpuCommandBufferStub* stub);
126 void DescheduleCommandBuffer(GpuCommandBufferStub* stub);
124 127
125 gl::GLShareGroup* share_group() const { return share_group_.get(); } 128 gl::GLShareGroup* share_group() const { return share_group_.get(); }
126 129
127 GpuCommandBufferStub* LookupCommandBuffer(int32_t route_id); 130 GpuCommandBufferStub* LookupCommandBuffer(int32_t route_id);
128 131
129 void LoseAllContexts(); 132 void LoseAllContexts();
130 void MarkAllContextsLost(); 133 void MarkAllContextsLost();
131 134
132 // Called to add a listener for a particular message routing ID. 135 // Called to add a listener for a particular message routing ID.
133 // Returns true if succeeded. 136 // Returns true if succeeded.
(...skipping 18 matching lines...) Expand all
152 155
153 GpuChannelMessageFilter* filter() const { return filter_.get(); } 156 GpuChannelMessageFilter* filter() const { return filter_.get(); }
154 157
155 // Returns the global order number for the last processed IPC message. 158 // Returns the global order number for the last processed IPC message.
156 uint32_t GetProcessedOrderNum() const; 159 uint32_t GetProcessedOrderNum() const;
157 160
158 // Returns the global order number for the last unprocessed IPC message. 161 // Returns the global order number for the last unprocessed IPC message.
159 uint32_t GetUnprocessedOrderNum() const; 162 uint32_t GetUnprocessedOrderNum() const;
160 163
161 // Returns the shared sync point global order data for the stream. 164 // Returns the shared sync point global order data for the stream.
162 scoped_refptr<SyncPointOrderData> GetSyncPointOrderData( 165 scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(int32_t stream_id);
163 int32_t stream_id);
164 166
165 void PostHandleOutOfOrderMessage(const IPC::Message& message); 167 void PostHandleOutOfOrderMessage(const IPC::Message& msg);
166 void PostHandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue); 168
169 void PostHandleMessageOnStream(scoped_refptr<GpuChannelMessageQueue> stream);
170
171 void HandleMessageOnStream(scoped_refptr<GpuChannelMessageQueue> stream);
167 172
168 // Synchronously handle the message to make testing convenient. 173 // Synchronously handle the message to make testing convenient.
169 void HandleMessageForTesting(const IPC::Message& msg); 174 void HandleMessageForTesting(const IPC::Message& msg);
170 175
171 #if defined(OS_ANDROID) 176 #if defined(OS_ANDROID)
172 const GpuCommandBufferStub* GetOneStub() const; 177 const GpuCommandBufferStub* GetOneStub() const;
173 #endif 178 #endif
174 179
175 protected: 180 protected:
176 // The message filter on the io thread. 181 // The message filter on the io thread.
177 scoped_refptr<GpuChannelMessageFilter> filter_; 182 scoped_refptr<GpuChannelMessageFilter> filter_;
178 183
179 // Map of routing id to command buffer stub. 184 // Map of routing id to command buffer stub.
180 base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_; 185 std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
186
187 std::unordered_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> streams_;
181 188
182 private: 189 private:
183 friend class TestGpuChannel; 190 friend class TestGpuChannel;
184 191
185 bool OnControlMessageReceived(const IPC::Message& msg); 192 bool OnControlMessageReceived(const IPC::Message& msg);
186 193
187 void HandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue); 194 void HandleMessageHelper(const IPC::Message& msg);
188 195
189 // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are 196 // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
190 // processed as soon as possible because the client is blocked until they 197 // processed as soon as possible because the client is blocked until they
191 // are completed. 198 // are completed.
192 void HandleOutOfOrderMessage(const IPC::Message& msg); 199 void HandleOutOfOrderMessage(const IPC::Message& msg);
193 200
194 void HandleMessageHelper(const IPC::Message& msg);
195
196 scoped_refptr<GpuChannelMessageQueue> CreateStream( 201 scoped_refptr<GpuChannelMessageQueue> CreateStream(
197 int32_t stream_id, 202 int32_t stream_id,
198 GpuStreamPriority stream_priority); 203 GpuStreamPriority stream_priority);
199 204
200 scoped_refptr<GpuChannelMessageQueue> LookupStream(int32_t stream_id); 205 scoped_refptr<GpuChannelMessageQueue> LookupStream(int32_t stream_id);
201 206
202 void DestroyStreamIfNecessary( 207 scoped_refptr<GpuChannelMessageQueue> LookupStreamByCommandBufferId(
203 const scoped_refptr<GpuChannelMessageQueue>& queue); 208 CommandBufferId command_buffer_id);
204
205 void AddRouteToStream(int32_t route_id, int32_t stream_id);
206 void RemoveRouteFromStream(int32_t route_id);
207 209
208 // Message handlers for control messages. 210 // Message handlers for control messages.
209 void OnCreateCommandBuffer(const GPUCreateCommandBufferConfig& init_params, 211 void OnCreateCommandBuffer(const GPUCreateCommandBufferConfig& init_params,
210 int32_t route_id, 212 int32_t route_id,
211 base::SharedMemoryHandle shared_state_shm, 213 base::SharedMemoryHandle shared_state_shm,
212 bool* result, 214 bool* result,
213 gpu::Capabilities* capabilities); 215 gpu::Capabilities* capabilities);
214 void OnDestroyCommandBuffer(int32_t route_id); 216 void OnDestroyCommandBuffer(int32_t route_id);
215 void OnGetDriverBugWorkArounds( 217 void OnGetDriverBugWorkArounds(
216 std::vector<std::string>* gpu_driver_bug_workarounds); 218 std::vector<std::string>* gpu_driver_bug_workarounds);
217 219
218 std::unique_ptr<GpuCommandBufferStub> CreateCommandBuffer( 220 GpuCommandBufferStub* CreateCommandBuffer(
219 const GPUCreateCommandBufferConfig& init_params, 221 const GPUCreateCommandBufferConfig& init_params,
220 int32_t route_id, 222 int32_t route_id,
221 std::unique_ptr<base::SharedMemory> shared_state_shm); 223 std::unique_ptr<base::SharedMemory> shared_state_shm);
222 224
223 // The lifetime of objects of this class is managed by a GpuChannelManager. 225 // The lifetime of objects of this class is managed by a GpuChannelManager.
224 // The GpuChannelManager destroy all the GpuChannels that they own when they 226 // The GpuChannelManager destroy all the GpuChannels that they own when they
225 // are destroyed. So a raw pointer is safe. 227 // are destroyed. So a raw pointer is safe.
226 GpuChannelManager* const gpu_channel_manager_; 228 GpuChannelManager* const gpu_channel_manager_;
227 229
228 // Sync point manager. Outlives the channel and is guaranteed to outlive the 230 // Sync point manager. Outlives the channel and is guaranteed to outlive the
229 // message loop. 231 // message loop.
230 SyncPointManager* const sync_point_manager_; 232 SyncPointManager* const sync_point_manager_;
231 233
234 GpuScheduler* scheduler_;
235
232 std::unique_ptr<IPC::SyncChannel> channel_; 236 std::unique_ptr<IPC::SyncChannel> channel_;
233 237
234 IPC::Listener* unhandled_message_listener_; 238 IPC::Listener* unhandled_message_listener_;
235 239
236 // Used to implement message routing functionality to CommandBuffer objects 240 // Used to implement message routing functionality to CommandBuffer objects
237 IPC::MessageRouter router_; 241 IPC::MessageRouter router_;
238 242
239 // Whether the processing of IPCs on this channel is stalled and we should 243 // This flag if provided, should be set when processing of IPC messages on
240 // preempt other GpuChannels. 244 // this channel is stalled and we should preempt other channels.
241 scoped_refptr<PreemptionFlag> preempting_flag_; 245 scoped_refptr<PreemptionFlag> preempting_flag_;
242 246
243 // If non-NULL, all stubs on this channel should stop processing GL 247 // This flag if provided, tells command buffers to stop processing commands
244 // commands (via their CommandExecutor) when preempted_flag_->IsSet() 248 // and yield when set.
245 scoped_refptr<PreemptionFlag> preempted_flag_; 249 scoped_refptr<PreemptionFlag> preemption_flag_;
246 250
247 // The id of the client who is on the other side of the channel. 251 // The id of the client who is on the other side of the channel.
248 const int32_t client_id_; 252 const int32_t client_id_;
249 253
250 // The tracing ID used for memory allocations associated with this client. 254 // The tracing ID used for memory allocations associated with this client.
251 const uint64_t client_tracing_id_; 255 const uint64_t client_tracing_id_;
252 256
253 // The task runners for the main thread and the io thread. 257 // The task runners for the main thread and the io thread.
254 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; 258 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
255 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; 259 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
256 260
257 // The share group that all contexts associated with a particular renderer 261 // The share group that all contexts associated with a particular renderer
258 // process use. 262 // process use.
259 scoped_refptr<gl::GLShareGroup> share_group_; 263 scoped_refptr<gl::GLShareGroup> share_group_;
260 264
261 scoped_refptr<gles2::MailboxManager> mailbox_manager_; 265 scoped_refptr<gles2::MailboxManager> mailbox_manager_;
262 266
263 GpuWatchdogThread* const watchdog_; 267 GpuWatchdogThread* const watchdog_;
264 268
265 // Map of stream id to appropriate message queue.
266 base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> streams_;
267
268 // Multimap of stream id to route ids.
269 base::hash_map<int32_t, int> streams_to_num_routes_;
270
271 // Map of route id to stream id;
272 base::hash_map<int32_t, int32_t> routes_to_streams_;
273
274 // Can view command buffers be created on this channel. 269 // Can view command buffers be created on this channel.
275 const bool allow_view_command_buffers_; 270 const bool allow_view_command_buffers_;
276 271
277 // Can real time streams be created on this channel. 272 // Can real time streams be created on this channel.
278 const bool allow_real_time_streams_; 273 const bool allow_real_time_streams_;
279 274
280 base::ProcessId peer_pid_; 275 base::ProcessId peer_pid_;
281 276
282 // Member variables should appear before the WeakPtrFactory, to ensure
283 // that any WeakPtrs to Controller are invalidated before its members
284 // variable's destructors are executed, rendering them invalid.
285 base::WeakPtrFactory<GpuChannel> weak_factory_;
286
287 DISALLOW_COPY_AND_ASSIGN(GpuChannel); 277 DISALLOW_COPY_AND_ASSIGN(GpuChannel);
288 }; 278 };
289 279
290 // This filter does three things: 280 // This filter does three things:
291 // - it counts and timestamps each message forwarded to the channel 281 // - it counts and timestamps each message forwarded to the channel
292 // so that we can preempt other channels if a message takes too long to 282 // so that we can preempt other channels if a message takes too long to
293 // process. To guarantee fairness, we must wait a minimum amount of time 283 // process. To guarantee fairness, we must wait a minimum amount of time
294 // before preempting and we limit the amount of time that we can preempt in 284 // before preempting and we limit the amount of time that we can preempt in
295 // one shot (see constants above). 285 // one shot (see constants above).
296 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO 286 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
297 // thread, generating the sync point ID and responding immediately, and then 287 // thread, generating the sync point ID and responding immediately, and then
298 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message 288 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
299 // into the channel's queue. 289 // into the channel's queue.
300 // - it generates mailbox names for clients of the GPU process on the IO thread. 290 // - it generates mailbox names for clients of the GPU process on the IO thread.
301 class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter { 291 class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
302 public: 292 public:
303 GpuChannelMessageFilter(); 293 static scoped_refptr<GpuChannelMessageFilter> Create(GpuChannel* gpu_channel);
294 void Disable();
304 295
305 // IPC::MessageFilter implementation. 296 // IPC::MessageFilter implementation.
306 void OnFilterAdded(IPC::Channel* channel) override; 297 void OnFilterAdded(IPC::Channel* channel) override;
307 void OnFilterRemoved() override; 298 void OnFilterRemoved() override;
308 void OnChannelConnected(int32_t peer_pid) override; 299 void OnChannelConnected(int32_t peer_pid) override;
309 void OnChannelError() override; 300 void OnChannelError() override;
310 void OnChannelClosing() override; 301 void OnChannelClosing() override;
311 bool OnMessageReceived(const IPC::Message& message) override; 302 bool OnMessageReceived(const IPC::Message& message) override;
312 303
304 void AddRoute(int32_t route_id, scoped_refptr<GpuChannelMessageQueue> stream);
305 scoped_refptr<GpuChannelMessageQueue> RemoveRoute(int32_t route_id);
306
313 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter); 307 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
314 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter); 308 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
315 309
316 void AddRoute(int32_t route_id,
317 const scoped_refptr<GpuChannelMessageQueue>& queue);
318 void RemoveRoute(int32_t route_id);
319
320 bool Send(IPC::Message* message); 310 bool Send(IPC::Message* message);
321 311
322 protected: 312 protected:
313 GpuChannelMessageFilter(GpuChannel* gpu_channel);
323 ~GpuChannelMessageFilter() override; 314 ~GpuChannelMessageFilter() override;
324 315
325 private: 316 private:
326 scoped_refptr<GpuChannelMessageQueue> LookupStreamByRoute(int32_t route_id); 317 scoped_refptr<GpuChannelMessageQueue> LookupRoute(int32_t route_id);
327 318
328 bool MessageErrorHandler(const IPC::Message& message, const char* error_msg); 319 bool MessageErrorHandler(const IPC::Message& message, const char* error_msg);
329 320
330 // Map of route id to message queue. 321 bool enabled_ = true;
331 base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> routes_; 322 GpuChannel* gpu_channel_ = nullptr;
332 base::Lock routes_lock_; // Protects |routes_|. 323 std::unordered_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> routes_;
324 std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
325 base::Lock lock_; // Protects the above variables.
333 326
334 IPC::Channel* channel_; 327 IPC::Channel* channel_ = nullptr;
335 base::ProcessId peer_pid_; 328 base::ProcessId peer_pid_ = base::kNullProcessId;
336 std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
337 329
338 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter); 330 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
339 }; 331 };
340 332
341 struct GpuChannelMessage { 333 struct GpuChannelMessage {
342 IPC::Message message; 334 IPC::Message message;
343 uint32_t order_number; 335 uint32_t order_number;
344 base::TimeTicks time_received; 336 base::TimeTicks time_received;
345 337
346 GpuChannelMessage(const IPC::Message& msg, 338 GpuChannelMessage(const IPC::Message& msg,
347 uint32_t order_num, 339 uint32_t order_num,
348 base::TimeTicks ts) 340 base::TimeTicks ts)
349 : message(msg), order_number(order_num), time_received(ts) {} 341 : message(msg), order_number(order_num), time_received(ts) {}
350 342
351 private: 343 private:
352 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage); 344 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage);
353 }; 345 };
354 346
355 class GpuChannelMessageQueue 347 class GpuChannelMessageQueue
356 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { 348 : public GpuCommandStream,
349 public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
357 public: 350 public:
358 static scoped_refptr<GpuChannelMessageQueue> Create( 351 static scoped_refptr<GpuChannelMessageQueue> Create(
359 int32_t stream_id, 352 int32_t stream_id,
360 GpuStreamPriority stream_priority, 353 GpuStreamPriority stream_priority,
361 GpuChannel* channel, 354 GpuChannel* channel,
362 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, 355 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
363 const scoped_refptr<PreemptionFlag>& preempting_flag, 356 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
364 const scoped_refptr<PreemptionFlag>& preempted_flag, 357 GpuScheduler* scheduler,
358 scoped_refptr<PreemptionFlag> preempting_flag,
359 scoped_refptr<PreemptionFlag> preemption_flag,
365 SyncPointManager* sync_point_manager); 360 SyncPointManager* sync_point_manager);
366 361
367 void Disable(); 362 void Destroy();
368 void DisableIO(); 363 void DestroyIO();
364
365 void OnRouteAdded();
366 void OnRouteRemoved();
367 size_t NumRoutes() const;
368
369 // GpuCommandStream implementation.
370 void Run() override;
369 371
370 int32_t stream_id() const { return stream_id_; } 372 int32_t stream_id() const { return stream_id_; }
371 GpuStreamPriority stream_priority() const { return stream_priority_; } 373 GpuStreamPriority stream_priority() const { return priority_; }
372 374
373 bool IsScheduled() const; 375 bool IsScheduled() const;
374 void OnRescheduled(bool scheduled); 376 void OnRescheduled(bool scheduled);
375 377
376 bool HasQueuedMessages() const; 378 void Schedule();
377 379 void Deschedule();
378 base::TimeTicks GetNextMessageTimeTick() const;
379 380
380 scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(); 381 scoped_refptr<SyncPointOrderData> GetSyncPointOrderData();
381 382
382 // Returns the global order number for the last unprocessed IPC message. 383 // Returns the global order number for the last unprocessed IPC message.
383 uint32_t GetUnprocessedOrderNum() const; 384 uint32_t GetUnprocessedOrderNum() const;
384 385
385 // Returns the global order number for the last unprocessed IPC message. 386 // Returns the global order number for the last unprocessed IPC message.
386 uint32_t GetProcessedOrderNum() const; 387 uint32_t GetProcessedOrderNum() const;
387 388
389 bool HasMessages() const;
390
391 void PushBackMessage(const IPC::Message& message);
392
388 // Should be called before a message begins to be processed. Returns false if 393 // Should be called before a message begins to be processed. Returns false if
389 // there are no messages to process. 394 // there are no messages to process.
390 const GpuChannelMessage* BeginMessageProcessing(); 395 const GpuChannelMessage* BeginMessageProcessing();
391 // Should be called if a message began processing but did not finish. 396 // Should be called if a message began processing but did not finish.
392 void PauseMessageProcessing(); 397 void PauseMessageProcessing();
393 // Should be called if a message is completely processed. Returns true if 398 // Should be called if a message is completely processed. Returns true if
394 // there are more messages to process. 399 // there are more messages to process.
395 void FinishMessageProcessing(); 400 void FinishMessageProcessing();
396 401
397 bool PushBackMessage(const IPC::Message& message);
398
399 private: 402 private:
400 enum PreemptionState { 403 enum PreemptionState {
401 // Either there's no other channel to preempt, there are no messages 404 // Either there's no other channel to preempt, there are no messages
402 // pending processing, or we just finished preempting and have to wait 405 // pending processing, or we just finished preempting and have to wait
403 // before preempting again. 406 // before preempting again.
404 IDLE, 407 IDLE,
405 // We are waiting kPreemptWaitTimeMs before checking if we should preempt. 408 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
406 WAITING, 409 WAITING,
407 // We can preempt whenever any IPC processing takes more than 410 // We can preempt whenever any IPC processing takes more than
408 // kPreemptWaitTimeMs. 411 // kPreemptWaitTimeMs.
409 CHECKING, 412 CHECKING,
410 // We are currently preempting (i.e. no stub is descheduled). 413 // We are currently preempting (i.e. no stub is descheduled).
411 PREEMPTING, 414 PREEMPTING,
412 // We would like to preempt, but some stub is descheduled. 415 // We would like to preempt, but some stub is descheduled.
413 WOULD_PREEMPT_DESCHEDULED, 416 WOULD_PREEMPT_DESCHEDULED,
414 }; 417 };
415 418
416 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; 419 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
417 420
418 GpuChannelMessageQueue( 421 GpuChannelMessageQueue(
419 int32_t stream_id, 422 int32_t stream_id,
420 GpuStreamPriority stream_priority, 423 GpuStreamPriority stream_priority,
421 GpuChannel* channel, 424 GpuChannel* channel,
422 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, 425 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
423 const scoped_refptr<PreemptionFlag>& preempting_flag, 426 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
424 const scoped_refptr<PreemptionFlag>& preempted_flag, 427 GpuScheduler* scheduler,
428 scoped_refptr<PreemptionFlag> preempting_flag,
429 scoped_refptr<PreemptionFlag> preemption_flag,
425 SyncPointManager* sync_point_manager); 430 SyncPointManager* sync_point_manager);
426 ~GpuChannelMessageQueue(); 431 ~GpuChannelMessageQueue() override;
427 432
433 void PostUpdatePreemptionState();
428 void UpdatePreemptionState(); 434 void UpdatePreemptionState();
429 void UpdatePreemptionStateHelper(); 435 void UpdatePreemptionStateHelper();
430 436
431 void UpdateStateIdle(); 437 void UpdateStateIdle();
432 void UpdateStateWaiting(); 438 void UpdateStateWaiting();
433 void UpdateStateChecking(); 439 void UpdateStateChecking();
434 void UpdateStatePreempting(); 440 void UpdateStatePreempting();
435 void UpdateStateWouldPreemptDescheduled(); 441 void UpdateStateWouldPreemptDescheduled();
436 442
437 void TransitionToIdle(); 443 void TransitionToIdle();
438 void TransitionToWaiting(); 444 void TransitionToWaiting();
439 void TransitionToChecking(); 445 void TransitionToChecking();
440 void TransitionToPreempting(); 446 void TransitionToPreempting();
441 void TransitionToWouldPreemptDescheduled(); 447 void TransitionToWouldPreemptDescheduled();
442 448
443 bool ShouldTransitionToIdle() const; 449 bool ShouldTransitionToIdle() const;
444 450
445 const int32_t stream_id_; 451 const int32_t stream_id_;
446 const GpuStreamPriority stream_priority_; 452 const GpuStreamPriority priority_;
453 size_t num_routes_;
447 454
448 // These can be accessed from both IO and main threads and are protected by 455 // These can be accessed from both IO and main threads and are protected by
449 // |channel_lock_|. 456 // |channel_lock_|.
450 bool enabled_;
451 bool scheduled_; 457 bool scheduled_;
452 GpuChannel* const channel_; 458 GpuChannel* channel_;
453 std::deque<std::unique_ptr<GpuChannelMessage>> channel_messages_; 459 std::deque<std::unique_ptr<GpuChannelMessage>> channel_messages_;
454 mutable base::Lock channel_lock_; 460 mutable base::Lock channel_lock_;
455 461
456 // The following are accessed on the IO thread only. 462 // The following are accessed on the IO thread only.
457 // No lock is necessary for preemption state because it's only accessed on the 463 // No lock is necessary for preemption state because it's only accessed on the
458 // IO thread. 464 // IO thread.
459 PreemptionState preemption_state_; 465 PreemptionState preemption_state_;
460 // Maximum amount of time that we can spend in PREEMPTING. 466 // Maximum amount of time that we can spend in PREEMPTING.
461 // It is reset when we transition to IDLE. 467 // It is reset when we transition to IDLE.
462 base::TimeDelta max_preemption_time_; 468 base::TimeDelta max_preemption_time_;
463 // This timer is used and runs tasks on the IO thread. 469 // This timer is used to run tasks on the IO thread.
464 std::unique_ptr<base::OneShotTimer> timer_; 470 std::unique_ptr<base::OneShotTimer> timer_;
465 base::ThreadChecker io_thread_checker_; 471 base::ThreadChecker io_thread_checker_;
466 472
467 // Keeps track of sync point related state such as message order numbers. 473 // Keeps track of sync point related state such as message order numbers.
468 scoped_refptr<SyncPointOrderData> sync_point_order_data_; 474 scoped_refptr<SyncPointOrderData> sync_point_order_data_;
469 475
476 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
470 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; 477 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
478 GpuScheduler* const scheduler_;
471 scoped_refptr<PreemptionFlag> preempting_flag_; 479 scoped_refptr<PreemptionFlag> preempting_flag_;
472 scoped_refptr<PreemptionFlag> preempted_flag_; 480 scoped_refptr<PreemptionFlag> preemption_flag_;
473 SyncPointManager* const sync_point_manager_; 481 SyncPointManager* const sync_point_manager_;
474 482
475 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); 483 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
476 }; 484 };
477 485
478 } // namespace gpu 486 } // namespace gpu
479 487
480 #endif // GPU_IPC_SERVICE_GPU_CHANNEL_H_ 488 #endif // GPU_IPC_SERVICE_GPU_CHANNEL_H_
OLDNEW
« no previous file with comments | « gpu/ipc/service/BUILD.gn ('k') | gpu/ipc/service/gpu_channel.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698