| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ | 5 #ifndef GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ |
| 6 #define GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ | 6 #define GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ |
| 7 | 7 |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 89 const GURL& active_url, | 89 const GURL& active_url, |
| 90 scoped_refptr<base::SingleThreadTaskRunner> task_runner); | 90 scoped_refptr<base::SingleThreadTaskRunner> task_runner); |
| 91 ~CommandBufferProxyImpl() override; | 91 ~CommandBufferProxyImpl() override; |
| 92 | 92 |
| 93 // IPC::Listener implementation: | 93 // IPC::Listener implementation: |
| 94 bool OnMessageReceived(const IPC::Message& message) override; | 94 bool OnMessageReceived(const IPC::Message& message) override; |
| 95 void OnChannelError() override; | 95 void OnChannelError() override; |
| 96 | 96 |
| 97 // CommandBuffer implementation: | 97 // CommandBuffer implementation: |
| 98 State GetLastState() override; | 98 State GetLastState() override; |
| 99 int32_t GetLastToken() override; | |
| 100 void Flush(int32_t put_offset) override; | 99 void Flush(int32_t put_offset) override; |
| 101 void OrderingBarrier(int32_t put_offset) override; | 100 void OrderingBarrier(int32_t put_offset) override; |
| 102 void WaitForTokenInRange(int32_t start, int32_t end) override; | 101 State WaitForTokenInRange(int32_t start, int32_t end) override; |
| 103 void WaitForGetOffsetInRange(int32_t start, int32_t end) override; | 102 State WaitForGetOffsetInRange(int32_t start, int32_t end) override; |
| 104 void SetGetBuffer(int32_t shm_id) override; | 103 void SetGetBuffer(int32_t shm_id) override; |
| 105 scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size, | 104 scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size, |
| 106 int32_t* id) override; | 105 int32_t* id) override; |
| 107 void DestroyTransferBuffer(int32_t id) override; | 106 void DestroyTransferBuffer(int32_t id) override; |
| 108 | 107 |
| 109 // gpu::GpuControl implementation: | 108 // gpu::GpuControl implementation: |
| 110 void SetGpuControlClient(GpuControlClient* client) override; | 109 void SetGpuControlClient(GpuControlClient* client) override; |
| 111 gpu::Capabilities GetCapabilities() override; | 110 gpu::Capabilities GetCapabilities() override; |
| 112 int32_t CreateImage(ClientBuffer buffer, | 111 int32_t CreateImage(ClientBuffer buffer, |
| 113 size_t width, | 112 size_t width, |
| 114 size_t height, | 113 size_t height, |
| 115 unsigned internal_format) override; | 114 unsigned internal_format) override; |
| 116 void DestroyImage(int32_t id) override; | 115 void DestroyImage(int32_t id) override; |
| 117 int32_t CreateGpuMemoryBufferImage(size_t width, | 116 int32_t CreateGpuMemoryBufferImage(size_t width, |
| 118 size_t height, | 117 size_t height, |
| 119 unsigned internal_format, | 118 unsigned internal_format, |
| 120 unsigned usage) override; | 119 unsigned usage) override; |
| 121 void SignalQuery(uint32_t query, const base::Closure& callback) override; | 120 void SignalQuery(uint32_t query, const base::Closure& callback) override; |
| 122 void SetLock(base::Lock* lock) override; | 121 void SetLock(base::Lock* lock) override; |
| 123 void EnsureWorkVisible() override; | 122 void EnsureWorkVisible() override; |
| 124 gpu::CommandBufferNamespace GetNamespaceID() const override; | 123 gpu::CommandBufferNamespace GetNamespaceID() const override; |
| 125 gpu::CommandBufferId GetCommandBufferID() const override; | 124 gpu::CommandBufferId GetCommandBufferID() const override; |
| 126 int32_t GetExtraCommandBufferData() const override; | 125 int32_t GetExtraCommandBufferData() const override; |
| 127 uint64_t GenerateFenceSyncRelease() override; | 126 uint64_t GenerateFenceSyncRelease() override; |
| 128 bool IsFenceSyncRelease(uint64_t release) override; | 127 bool IsFenceSyncRelease(uint64_t release) override; |
| 129 bool IsFenceSyncFlushed(uint64_t release) override; | 128 bool IsFenceSyncFlushed(uint64_t release) override; |
| 130 bool IsFenceSyncFlushReceived(uint64_t release) override; | 129 bool IsFenceSyncFlushReceived(uint64_t release) override; |
| 130 bool IsFenceSyncReleased(uint64_t release) override; |
| 131 void SignalSyncToken(const gpu::SyncToken& sync_token, | 131 void SignalSyncToken(const gpu::SyncToken& sync_token, |
| 132 const base::Closure& callback) override; | 132 const base::Closure& callback) override; |
| 133 bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override; | 133 bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override; |
| 134 | 134 |
| 135 void TakeFrontBuffer(const gpu::Mailbox& mailbox); | 135 void TakeFrontBuffer(const gpu::Mailbox& mailbox); |
| 136 void ReturnFrontBuffer(const gpu::Mailbox& mailbox, | 136 void ReturnFrontBuffer(const gpu::Mailbox& mailbox, |
| 137 const gpu::SyncToken& sync_token, | 137 const gpu::SyncToken& sync_token, |
| 138 bool is_lost); | 138 bool is_lost); |
| 139 | 139 |
| 140 void AddDeletionObserver(DeletionObserver* observer); | 140 void AddDeletionObserver(DeletionObserver* observer); |
| 141 void RemoveDeletionObserver(DeletionObserver* observer); | 141 void RemoveDeletionObserver(DeletionObserver* observer); |
| 142 | 142 |
| 143 bool EnsureBackbuffer(); | 143 bool EnsureBackbuffer(); |
| 144 | 144 |
| 145 void SetOnConsoleMessageCallback(const GpuConsoleMessageCallback& callback); | 145 void SetOnConsoleMessageCallback(const GpuConsoleMessageCallback& callback); |
| 146 | 146 |
| 147 void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info); | 147 void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info); |
| 148 using SwapBuffersCompletionCallback = base::Callback<void( | 148 using SwapBuffersCompletionCallback = base::Callback<void( |
| 149 const std::vector<ui::LatencyInfo>& latency_info, | 149 const std::vector<ui::LatencyInfo>& latency_info, |
| 150 gfx::SwapResult result, | 150 gfx::SwapResult result, |
| 151 const gpu::GpuProcessHostedCALayerTreeParamsMac* params_mac)>; | 151 const gpu::GpuProcessHostedCALayerTreeParamsMac* params_mac)>; |
| 152 void SetSwapBuffersCompletionCallback( | 152 void SetSwapBuffersCompletionCallback( |
| 153 const SwapBuffersCompletionCallback& callback); | 153 const SwapBuffersCompletionCallback& callback); |
| 154 | 154 |
| 155 using UpdateVSyncParametersCallback = | 155 using UpdateVSyncParametersCallback = |
| 156 base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>; | 156 base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>; |
| 157 void SetUpdateVSyncParametersCallback( | 157 void SetUpdateVSyncParametersCallback( |
| 158 const UpdateVSyncParametersCallback& callback); | 158 const UpdateVSyncParametersCallback& callback); |
| 159 | 159 |
| 160 // TODO(apatrick): this is a temporary optimization while skia is calling | |
| 161 // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6 | |
| 162 // ints redundantly when only the error is needed for the | |
| 163 // CommandBufferProxyImpl implementation. | |
| 164 gpu::error::Error GetLastError() override; | |
| 165 | |
| 166 int32_t route_id() const { return route_id_; } | 160 int32_t route_id() const { return route_id_; } |
| 167 | 161 |
| 168 const scoped_refptr<GpuChannelHost>& channel() const { return channel_; } | 162 const scoped_refptr<GpuChannelHost>& channel() const { return channel_; } |
| 169 | 163 |
| 170 base::SharedMemoryHandle GetSharedStateHandle() const { | 164 base::SharedMemoryHandle GetSharedStateHandle() const { |
| 171 return shared_state_shm_->handle(); | 165 return shared_state_shm_->handle(); |
| 172 } | 166 } |
| 173 uint32_t CreateStreamTexture(uint32_t texture_id); | 167 uint32_t CreateStreamTexture(uint32_t texture_id); |
| 174 | 168 |
| 175 private: | 169 private: |
| (...skipping 28 matching lines...) Expand all Loading... |
| 204 void OnUpdateVSyncParameters(base::TimeTicks timebase, | 198 void OnUpdateVSyncParameters(base::TimeTicks timebase, |
| 205 base::TimeDelta interval); | 199 base::TimeDelta interval); |
| 206 | 200 |
| 207 // Updates the highest verified release fence sync. | 201 // Updates the highest verified release fence sync. |
| 208 void UpdateVerifiedReleases(uint32_t verified_flush); | 202 void UpdateVerifiedReleases(uint32_t verified_flush); |
| 209 void CleanupFlushedReleases(uint32_t highest_verified_flush_id); | 203 void CleanupFlushedReleases(uint32_t highest_verified_flush_id); |
| 210 | 204 |
| 211 // Try to read an updated copy of the state from shared memory, and calls | 205 // Try to read an updated copy of the state from shared memory, and calls |
| 212 // OnGpuStateError() if the new state has an error. | 206 // OnGpuStateError() if the new state has an error. |
| 213 void TryUpdateState(); | 207 void TryUpdateState(); |
| 208 // Like above but calls the error handler and disconnects channel by posting |
| 209 // a task. |
| 210 void TryUpdateStateThreadSafe(); |
| 214 // Like the above but does not call the error event handler if the new state | 211 // Like the above but does not call the error event handler if the new state |
| 215 // has an error. | 212 // has an error. |
| 216 void TryUpdateStateDontReportError(); | 213 void TryUpdateStateDontReportError(); |
| 217 // Sets the state, and calls OnGpuStateError() if the new state has an error. | 214 // Sets the state, and calls OnGpuStateError() if the new state has an error. |
| 218 void SetStateFromSyncReply(const gpu::CommandBuffer::State& state); | 215 void SetStateFromSyncReply(const gpu::CommandBuffer::State& state); |
| 219 | 216 |
| 220 // Loses the context after we received an invalid reply from the GPU | 217 // Loses the context after we received an invalid reply from the GPU |
| 221 // process. | 218 // process. |
| 222 void OnGpuSyncReplyError(); | 219 void OnGpuSyncReplyError(); |
| 223 | 220 |
| 224 // Loses the context when receiving a message from the GPU process. | 221 // Loses the context when receiving a message from the GPU process. |
| 225 void OnGpuAsyncMessageError(gpu::error::ContextLostReason reason, | 222 void OnGpuAsyncMessageError(gpu::error::ContextLostReason reason, |
| 226 gpu::error::Error error); | 223 gpu::error::Error error); |
| 227 | 224 |
| 228 // Loses the context after we receive an error state from the GPU process. | 225 // Loses the context after we receive an error state from the GPU process. |
| 229 void OnGpuStateError(); | 226 void OnGpuStateError(); |
| 230 | 227 |
| 231 // Sets an error on the last_state_ and loses the context due to client-side | 228 // Sets an error on the last_state_ and loses the context due to client-side |
| 232 // errors. | 229 // errors. |
| 233 void OnClientError(gpu::error::Error error); | 230 void OnClientError(gpu::error::Error error); |
| 234 | 231 |
| 235 // Helper methods, don't call these directly. | 232 // Helper methods, don't call these directly. |
| 236 void DisconnectChannelInFreshCallStack(); | 233 void DisconnectChannelInFreshCallStack(); |
| 237 void LockAndDisconnectChannel(); | 234 void LockAndDisconnectChannel(); |
| 238 void DisconnectChannel(); | 235 void DisconnectChannel(); |
| 239 | 236 |
| 240 // The shared memory area used to update state. | 237 // The shared memory area used to update state. |
| 241 gpu::CommandBufferSharedState* shared_state() const; | 238 gpu::CommandBufferSharedState* shared_state() const; |
| 242 | 239 |
| 240 // The shared memory area used to update state. |
| 241 std::unique_ptr<base::SharedMemory> shared_state_shm_; |
| 242 |
| 243 // The last cached state received from the service. |
| 244 State last_state_; |
| 245 |
| 246 // Lock to access shared state e.g. sync token release count across multiple |
| 247 // threads. This allows tracking command buffer progress from another thread. |
| 248 base::Lock last_state_lock_; |
| 249 |
| 243 // There should be a lock_ if this is going to be used across multiple | 250 // There should be a lock_ if this is going to be used across multiple |
| 244 // threads, or we guarantee it is used by a single thread by using a thread | 251 // threads, or we guarantee it is used by a single thread by using a thread |
| 245 // checker if no lock_ is set. | 252 // checker if no lock_ is set. |
| 246 base::Lock* lock_; | 253 base::Lock* lock_; |
| 247 base::ThreadChecker lockless_thread_checker_; | 254 base::ThreadChecker lockless_thread_checker_; |
| 248 | 255 |
| 249 // Client that wants to listen for important events on the GpuControl. | 256 // Client that wants to listen for important events on the GpuControl. |
| 250 gpu::GpuControlClient* gpu_control_client_; | 257 gpu::GpuControlClient* gpu_control_client_; |
| 251 | 258 |
| 252 // Unowned list of DeletionObservers. | 259 // Unowned list of DeletionObservers. |
| 253 base::ObserverList<DeletionObserver> deletion_observers_; | 260 base::ObserverList<DeletionObserver> deletion_observers_; |
| 254 | 261 |
| 255 // The last cached state received from the service. | |
| 256 State last_state_; | |
| 257 | |
| 258 // The shared memory area used to update state. | |
| 259 std::unique_ptr<base::SharedMemory> shared_state_shm_; | |
| 260 | |
| 261 scoped_refptr<GpuChannelHost> channel_; | 262 scoped_refptr<GpuChannelHost> channel_; |
| 262 const gpu::CommandBufferId command_buffer_id_; | 263 const gpu::CommandBufferId command_buffer_id_; |
| 263 const int32_t route_id_; | 264 const int32_t route_id_; |
| 264 const int32_t stream_id_; | 265 const int32_t stream_id_; |
| 265 uint32_t flush_count_; | 266 uint32_t flush_count_; |
| 266 int32_t last_put_offset_; | 267 int32_t last_put_offset_; |
| 267 int32_t last_barrier_put_offset_; | 268 int32_t last_barrier_put_offset_; |
| 268 | 269 |
| 269 // Next generated fence sync. | 270 // Next generated fence sync. |
| 270 uint64_t next_fence_sync_release_; | 271 uint64_t next_fence_sync_release_; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 293 | 294 |
| 294 base::WeakPtr<CommandBufferProxyImpl> weak_this_; | 295 base::WeakPtr<CommandBufferProxyImpl> weak_this_; |
| 295 scoped_refptr<base::SequencedTaskRunner> callback_thread_; | 296 scoped_refptr<base::SequencedTaskRunner> callback_thread_; |
| 296 | 297 |
| 297 DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl); | 298 DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl); |
| 298 }; | 299 }; |
| 299 | 300 |
| 300 } // namespace gpu | 301 } // namespace gpu |
| 301 | 302 |
| 302 #endif // GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ | 303 #endif // GPU_IPC_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_ |
| OLD | NEW |