OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_ | 5 #ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_ |
6 #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_ | 6 #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_ |
7 | 7 |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/containers/hash_tables.h" | 10 #include "base/containers/hash_tables.h" |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
61 class CONTENT_EXPORT GpuChannel | 61 class CONTENT_EXPORT GpuChannel |
62 : public IPC::Listener, | 62 : public IPC::Listener, |
63 public IPC::Sender, | 63 public IPC::Sender, |
64 public gpu::gles2::SubscriptionRefSet::Observer { | 64 public gpu::gles2::SubscriptionRefSet::Observer { |
65 public: | 65 public: |
66 // Takes ownership of the renderer process handle. | 66 // Takes ownership of the renderer process handle. |
67 GpuChannel(GpuChannelManager* gpu_channel_manager, | 67 GpuChannel(GpuChannelManager* gpu_channel_manager, |
68 GpuWatchdog* watchdog, | 68 GpuWatchdog* watchdog, |
69 gfx::GLShareGroup* share_group, | 69 gfx::GLShareGroup* share_group, |
70 gpu::gles2::MailboxManager* mailbox_manager, | 70 gpu::gles2::MailboxManager* mailbox_manager, |
| 71 gpu::PreemptionFlag* preempting_flag, |
71 base::SingleThreadTaskRunner* task_runner, | 72 base::SingleThreadTaskRunner* task_runner, |
72 base::SingleThreadTaskRunner* io_task_runner, | 73 base::SingleThreadTaskRunner* io_task_runner, |
73 int client_id, | 74 int client_id, |
74 uint64_t client_tracing_id, | 75 uint64_t client_tracing_id, |
75 bool software, | |
76 bool allow_future_sync_points, | 76 bool allow_future_sync_points, |
77 bool allow_real_time_streams); | 77 bool allow_real_time_streams); |
78 ~GpuChannel() override; | 78 ~GpuChannel() override; |
79 | 79 |
80 // Initializes the IPC channel. Caller takes ownership of the client FD in | 80 // Initializes the IPC channel. Caller takes ownership of the client FD in |
81 // the returned handle and is responsible for closing it. | 81 // the returned handle and is responsible for closing it. |
82 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event); | 82 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event); |
83 | 83 |
84 // Get the GpuChannelManager that owns this channel. | 84 // Get the GpuChannelManager that owns this channel. |
85 GpuChannelManager* gpu_channel_manager() const { | 85 GpuChannelManager* gpu_channel_manager() const { |
(...skipping 23 matching lines...) Expand all Loading... |
109 void OnAddSubscription(unsigned int target) override; | 109 void OnAddSubscription(unsigned int target) override; |
110 void OnRemoveSubscription(unsigned int target) override; | 110 void OnRemoveSubscription(unsigned int target) override; |
111 | 111 |
112 // This is called when a command buffer transitions between scheduled and | 112 // This is called when a command buffer transitions between scheduled and |
113 // descheduled states. When any stub is descheduled, we stop preempting | 113 // descheduled states. When any stub is descheduled, we stop preempting |
114 // other channels. | 114 // other channels. |
115 void OnStubSchedulingChanged(GpuCommandBufferStub* stub, bool scheduled); | 115 void OnStubSchedulingChanged(GpuCommandBufferStub* stub, bool scheduled); |
116 | 116 |
117 CreateCommandBufferResult CreateViewCommandBuffer( | 117 CreateCommandBufferResult CreateViewCommandBuffer( |
118 const gfx::GLSurfaceHandle& window, | 118 const gfx::GLSurfaceHandle& window, |
119 int32 surface_id, | |
120 const GPUCreateCommandBufferConfig& init_params, | 119 const GPUCreateCommandBufferConfig& init_params, |
121 int32 route_id); | 120 int32 route_id); |
122 | 121 |
123 gfx::GLShareGroup* share_group() const { return share_group_.get(); } | 122 gfx::GLShareGroup* share_group() const { return share_group_.get(); } |
124 | 123 |
125 GpuCommandBufferStub* LookupCommandBuffer(int32 route_id); | 124 GpuCommandBufferStub* LookupCommandBuffer(int32 route_id); |
126 | 125 |
127 void LoseAllContexts(); | 126 void LoseAllContexts(); |
128 void MarkAllContextsLost(); | 127 void MarkAllContextsLost(); |
129 | 128 |
130 // Called to add a listener for a particular message routing ID. | 129 // Called to add a listener for a particular message routing ID. |
131 // Returns true if succeeded. | 130 // Returns true if succeeded. |
132 bool AddRoute(int32 route_id, IPC::Listener* listener); | 131 bool AddRoute(int32 route_id, IPC::Listener* listener); |
133 | 132 |
134 // Called to remove a listener for a particular message routing ID. | 133 // Called to remove a listener for a particular message routing ID. |
135 void RemoveRoute(int32 route_id); | 134 void RemoveRoute(int32 route_id); |
136 | 135 |
137 gpu::PreemptionFlag* GetPreemptionFlag(); | 136 void SetPreemptingFlag(gpu::PreemptionFlag* flag); |
138 | 137 |
139 // If |preemption_flag->IsSet()|, any stub on this channel | 138 // If |preemption_flag->IsSet()|, any stub on this channel |
140 // should stop issuing GL commands. Setting this to NULL stops deferral. | 139 // should stop issuing GL commands. Setting this to NULL stops deferral. |
141 void SetPreemptByFlag( | 140 void SetPreemptByFlag( |
142 scoped_refptr<gpu::PreemptionFlag> preemption_flag); | 141 scoped_refptr<gpu::PreemptionFlag> preemption_flag); |
143 | 142 |
144 void CacheShader(const std::string& key, const std::string& shader); | 143 void CacheShader(const std::string& key, const std::string& shader); |
145 | 144 |
146 void AddFilter(IPC::MessageFilter* filter); | 145 void AddFilter(IPC::MessageFilter* filter); |
147 void RemoveFilter(IPC::MessageFilter* filter); | 146 void RemoveFilter(IPC::MessageFilter* filter); |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
269 scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_; | 268 scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_; |
270 | 269 |
271 scoped_refptr<gpu::gles2::SubscriptionRefSet> subscription_ref_set_; | 270 scoped_refptr<gpu::gles2::SubscriptionRefSet> subscription_ref_set_; |
272 | 271 |
273 scoped_refptr<gpu::ValueStateMap> pending_valuebuffer_state_; | 272 scoped_refptr<gpu::ValueStateMap> pending_valuebuffer_state_; |
274 | 273 |
275 scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_; | 274 scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_; |
276 | 275 |
277 gpu::gles2::DisallowedFeatures disallowed_features_; | 276 gpu::gles2::DisallowedFeatures disallowed_features_; |
278 GpuWatchdog* watchdog_; | 277 GpuWatchdog* watchdog_; |
279 bool software_; | |
280 | 278 |
281 size_t num_stubs_descheduled_; | 279 size_t num_stubs_descheduled_; |
282 | 280 |
283 // Map of stream id to stream state. | 281 // Map of stream id to stream state. |
284 base::hash_map<int32, StreamState> streams_; | 282 base::hash_map<int32, StreamState> streams_; |
285 | 283 |
286 bool allow_future_sync_points_; | 284 bool allow_future_sync_points_; |
287 bool allow_real_time_streams_; | 285 bool allow_real_time_streams_; |
288 | 286 |
289 // Member variables should appear before the WeakPtrFactory, to ensure | 287 // Member variables should appear before the WeakPtrFactory, to ensure |
(...skipping 14 matching lines...) Expand all Loading... |
304 // thread, generating the sync point ID and responding immediately, and then | 302 // thread, generating the sync point ID and responding immediately, and then |
305 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message | 303 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message |
306 // into the channel's queue. | 304 // into the channel's queue. |
307 // - it generates mailbox names for clients of the GPU process on the IO thread. | 305 // - it generates mailbox names for clients of the GPU process on the IO thread. |
308 class GpuChannelMessageFilter : public IPC::MessageFilter { | 306 class GpuChannelMessageFilter : public IPC::MessageFilter { |
309 public: | 307 public: |
310 GpuChannelMessageFilter(const base::WeakPtr<GpuChannel>& gpu_channel, | 308 GpuChannelMessageFilter(const base::WeakPtr<GpuChannel>& gpu_channel, |
311 GpuChannelMessageQueue* message_queue, | 309 GpuChannelMessageQueue* message_queue, |
312 gpu::SyncPointManager* sync_point_manager, | 310 gpu::SyncPointManager* sync_point_manager, |
313 base::SingleThreadTaskRunner* task_runner, | 311 base::SingleThreadTaskRunner* task_runner, |
| 312 gpu::PreemptionFlag* preempting_flag, |
314 bool future_sync_points); | 313 bool future_sync_points); |
315 | 314 |
316 // IPC::MessageFilter implementation. | 315 // IPC::MessageFilter implementation. |
317 void OnFilterAdded(IPC::Sender* sender) override; | 316 void OnFilterAdded(IPC::Sender* sender) override; |
318 void OnFilterRemoved() override; | 317 void OnFilterRemoved() override; |
319 void OnChannelConnected(int32 peer_pid) override; | 318 void OnChannelConnected(int32 peer_pid) override; |
320 void OnChannelError() override; | 319 void OnChannelError() override; |
321 void OnChannelClosing() override; | 320 void OnChannelClosing() override; |
322 bool OnMessageReceived(const IPC::Message& message) override; | 321 bool OnMessageReceived(const IPC::Message& message) override; |
323 | 322 |
324 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter); | 323 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter); |
325 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter); | 324 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter); |
326 | 325 |
327 void OnMessageProcessed(); | 326 void OnMessageProcessed(); |
328 | 327 |
329 void SetPreemptingFlagAndSchedulingState(gpu::PreemptionFlag* preempting_flag, | |
330 bool a_stub_is_descheduled); | |
331 | |
332 void UpdateStubSchedulingState(bool a_stub_is_descheduled); | 328 void UpdateStubSchedulingState(bool a_stub_is_descheduled); |
333 | 329 |
334 bool Send(IPC::Message* message); | 330 bool Send(IPC::Message* message); |
335 | 331 |
336 protected: | 332 protected: |
337 ~GpuChannelMessageFilter() override; | 333 ~GpuChannelMessageFilter() override; |
338 | 334 |
339 private: | 335 private: |
340 enum PreemptionState { | 336 enum PreemptionState { |
341 // Either there's no other channel to preempt, there are no messages | 337 // Either there's no other channel to preempt, there are no messages |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
469 | 465 |
470 base::WeakPtr<GpuChannel> gpu_channel_; | 466 base::WeakPtr<GpuChannel> gpu_channel_; |
471 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | 467 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
472 | 468 |
473 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); | 469 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); |
474 }; | 470 }; |
475 | 471 |
476 } // namespace content | 472 } // namespace content |
477 | 473 |
478 #endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_ | 474 #endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_ |
OLD | NEW |