| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" | 5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 #include <vector> | 8 #include <vector> |
| 9 | 9 |
| 10 #include "base/callback.h" | 10 #include "base/callback.h" |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 99 return command_buffer; | 99 return command_buffer; |
| 100 } | 100 } |
| 101 | 101 |
| 102 CommandBufferProxyImpl::~CommandBufferProxyImpl() { | 102 CommandBufferProxyImpl::~CommandBufferProxyImpl() { |
| 103 for (auto& observer : deletion_observers_) | 103 for (auto& observer : deletion_observers_) |
| 104 observer.OnWillDeleteImpl(); | 104 observer.OnWillDeleteImpl(); |
| 105 DisconnectChannel(); | 105 DisconnectChannel(); |
| 106 } | 106 } |
| 107 | 107 |
| 108 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { | 108 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { |
| 109 std::unique_ptr<base::AutoLock> lock; | 109 base::Optional<base::AutoLock> lock; |
| 110 if (lock_) | 110 if (lock_) |
| 111 lock.reset(new base::AutoLock(*lock_)); | 111 lock.emplace(*lock_); |
| 112 bool handled = true; | 112 bool handled = true; |
| 113 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message) | 113 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message) |
| 114 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed); | 114 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed); |
| 115 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage); | 115 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage); |
| 116 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, OnSignalAck); | 116 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, OnSignalAck); |
| 117 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted, | 117 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted, |
| 118 OnSwapBuffersCompleted); | 118 OnSwapBuffersCompleted); |
| 119 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters, | 119 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters, |
| 120 OnUpdateVSyncParameters); | 120 OnUpdateVSyncParameters); |
| 121 IPC_MESSAGE_UNHANDLED(handled = false) | 121 IPC_MESSAGE_UNHANDLED(handled = false) |
| 122 IPC_END_MESSAGE_MAP() | 122 IPC_END_MESSAGE_MAP() |
| 123 | 123 |
| 124 if (!handled) { | 124 if (!handled) { |
| 125 LOG(ERROR) << "Gpu process sent invalid message."; | 125 LOG(ERROR) << "Gpu process sent invalid message."; |
| 126 base::AutoLock last_state_lock(last_state_lock_); |
| 126 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, | 127 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, |
| 127 gpu::error::kLostContext); | 128 gpu::error::kLostContext); |
| 128 } | 129 } |
| 129 return handled; | 130 return handled; |
| 130 } | 131 } |
| 131 | 132 |
| 132 void CommandBufferProxyImpl::OnChannelError() { | 133 void CommandBufferProxyImpl::OnChannelError() { |
| 133 std::unique_ptr<base::AutoLock> lock; | 134 base::Optional<base::AutoLock> lock; |
| 134 if (lock_) | 135 if (lock_) |
| 135 lock.reset(new base::AutoLock(*lock_)); | 136 lock.emplace(*lock_); |
| 137 base::AutoLock last_state_lock(last_state_lock_); |
| 136 | 138 |
| 137 gpu::error::ContextLostReason context_lost_reason = | 139 gpu::error::ContextLostReason context_lost_reason = |
| 138 gpu::error::kGpuChannelLost; | 140 gpu::error::kGpuChannelLost; |
| 139 if (shared_state_shm_ && shared_state_shm_->memory()) { | 141 if (shared_state_shm_ && shared_state_shm_->memory()) { |
| 140 // The GPU process might have intentionally been crashed | 142 // The GPU process might have intentionally been crashed |
| 141 // (exit_on_context_lost), so try to find out the original reason. | 143 // (exit_on_context_lost), so try to find out the original reason. |
| 142 TryUpdateStateDontReportError(); | 144 TryUpdateStateDontReportError(); |
| 143 if (last_state_.error == gpu::error::kLostContext) | 145 if (last_state_.error == gpu::error::kLostContext) |
| 144 context_lost_reason = last_state_.context_lost_reason; | 146 context_lost_reason = last_state_.context_lost_reason; |
| 145 } | 147 } |
| 146 OnGpuAsyncMessageError(context_lost_reason, gpu::error::kLostContext); | 148 OnGpuAsyncMessageError(context_lost_reason, gpu::error::kLostContext); |
| 147 } | 149 } |
| 148 | 150 |
| 149 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason, | 151 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason, |
| 150 gpu::error::Error error) { | 152 gpu::error::Error error) { |
| 153 base::AutoLock lock(last_state_lock_); |
| 151 OnGpuAsyncMessageError(reason, error); | 154 OnGpuAsyncMessageError(reason, error); |
| 152 } | 155 } |
| 153 | 156 |
| 154 void CommandBufferProxyImpl::OnConsoleMessage( | 157 void CommandBufferProxyImpl::OnConsoleMessage( |
| 155 const GPUCommandBufferConsoleMessage& message) { | 158 const GPUCommandBufferConsoleMessage& message) { |
| 156 if (gpu_control_client_) | 159 if (gpu_control_client_) |
| 157 gpu_control_client_->OnGpuControlErrorMessage(message.message.c_str(), | 160 gpu_control_client_->OnGpuControlErrorMessage(message.message.c_str(), |
| 158 message.id); | 161 message.id); |
| 159 } | 162 } |
| 160 | 163 |
| 161 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { | 164 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { |
| 162 std::unique_ptr<base::AutoLock> lock; | 165 std::unique_ptr<base::AutoLock> lock; |
| 163 if (lock_) | 166 if (lock_) |
| 164 lock.reset(new base::AutoLock(*lock_)); | 167 lock.reset(new base::AutoLock(*lock_)); |
| 165 deletion_observers_.AddObserver(observer); | 168 deletion_observers_.AddObserver(observer); |
| 166 } | 169 } |
| 167 | 170 |
| 168 void CommandBufferProxyImpl::RemoveDeletionObserver( | 171 void CommandBufferProxyImpl::RemoveDeletionObserver( |
| 169 DeletionObserver* observer) { | 172 DeletionObserver* observer) { |
| 170 std::unique_ptr<base::AutoLock> lock; | 173 std::unique_ptr<base::AutoLock> lock; |
| 171 if (lock_) | 174 if (lock_) |
| 172 lock.reset(new base::AutoLock(*lock_)); | 175 lock.reset(new base::AutoLock(*lock_)); |
| 173 deletion_observers_.RemoveObserver(observer); | 176 deletion_observers_.RemoveObserver(observer); |
| 174 } | 177 } |
| 175 | 178 |
| 176 void CommandBufferProxyImpl::OnSignalAck(uint32_t id) { | 179 void CommandBufferProxyImpl::OnSignalAck(uint32_t id) { |
| 177 SignalTaskMap::iterator it = signal_tasks_.find(id); | 180 SignalTaskMap::iterator it = signal_tasks_.find(id); |
| 178 if (it == signal_tasks_.end()) { | 181 if (it == signal_tasks_.end()) { |
| 179 LOG(ERROR) << "Gpu process sent invalid SignalAck."; | 182 LOG(ERROR) << "Gpu process sent invalid SignalAck."; |
| 183 base::AutoLock lock(last_state_lock_); |
| 180 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, | 184 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, |
| 181 gpu::error::kLostContext); | 185 gpu::error::kLostContext); |
| 182 return; | 186 return; |
| 183 } | 187 } |
| 184 base::Closure callback = it->second; | 188 base::Closure callback = it->second; |
| 185 signal_tasks_.erase(it); | 189 signal_tasks_.erase(it); |
| 186 callback.Run(); | 190 callback.Run(); |
| 187 } | 191 } |
| 188 | 192 |
| 189 bool CommandBufferProxyImpl::Initialize( | 193 bool CommandBufferProxyImpl::Initialize( |
| (...skipping 13 matching lines...) Expand all Loading... |
| 203 shared_state()->Initialize(); | 207 shared_state()->Initialize(); |
| 204 | 208 |
| 205 // This handle is owned by the GPU process and must be passed to it or it | 209 // This handle is owned by the GPU process and must be passed to it or it |
| 206 // will leak. In otherwords, do not early out on error between here and the | 210 // will leak. In otherwords, do not early out on error between here and the |
| 207 // sending of the CreateCommandBuffer IPC below. | 211 // sending of the CreateCommandBuffer IPC below. |
| 208 base::SharedMemoryHandle handle = | 212 base::SharedMemoryHandle handle = |
| 209 channel->ShareToGpuProcess(shared_state_shm_->handle()); | 213 channel->ShareToGpuProcess(shared_state_shm_->handle()); |
| 210 if (!base::SharedMemory::IsHandleValid(handle)) | 214 if (!base::SharedMemory::IsHandleValid(handle)) |
| 211 return false; | 215 return false; |
| 212 | 216 |
| 213 | |
| 214 // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. | 217 // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. |
| 215 tracked_objects::ScopedTracker tracking_profile( | 218 tracked_objects::ScopedTracker tracking_profile( |
| 216 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 219 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
| 217 "125248 CommandBufferProxyImpl::Initialize")); | 220 "125248 CommandBufferProxyImpl::Initialize")); |
| 218 | 221 |
| 219 // Route must be added before sending the message, otherwise messages sent | 222 // Route must be added before sending the message, otherwise messages sent |
| 220 // from the GPU process could race against adding ourselves to the filter. | 223 // from the GPU process could race against adding ourselves to the filter. |
| 221 channel->AddRouteWithTaskRunner(route_id_, AsWeakPtr(), task_runner); | 224 channel->AddRouteWithTaskRunner(route_id_, AsWeakPtr(), task_runner); |
| 222 | 225 |
| 223 // We're blocking the UI thread, which is generally undesirable. | 226 // We're blocking the UI thread, which is generally undesirable. |
| 224 // In this case we need to wait for this before we can show any UI /anyway/, | 227 // In this case we need to wait for this before we can show any UI /anyway/, |
| 225 // so it won't cause additional jank. | 228 // so it won't cause additional jank. |
| 226 // TODO(piman): Make this asynchronous (http://crbug.com/125248). | 229 // TODO(piman): Make this asynchronous (http://crbug.com/125248). |
| 227 bool result = false; | 230 bool result = false; |
| 228 bool sent = channel->Send(new GpuChannelMsg_CreateCommandBuffer( | 231 bool sent = channel->Send(new GpuChannelMsg_CreateCommandBuffer( |
| 229 config, route_id_, handle, &result, &capabilities_)); | 232 config, route_id_, handle, &result, &capabilities_)); |
| 230 if (!sent || !result) { | 233 if (!sent || !result) { |
| 231 DLOG(ERROR) << "Failed to send GpuChannelMsg_CreateCommandBuffer."; | 234 DLOG(ERROR) << "Failed to send GpuChannelMsg_CreateCommandBuffer."; |
| 232 channel->RemoveRoute(route_id_); | 235 channel->RemoveRoute(route_id_); |
| 233 return false; | 236 return false; |
| 234 } | 237 } |
| 235 | 238 |
| 236 channel_ = std::move(channel); | 239 channel_ = std::move(channel); |
| 237 callback_thread_ = std::move(task_runner); | 240 callback_thread_ = std::move(task_runner); |
| 238 | 241 |
| 239 return true; | 242 return true; |
| 240 } | 243 } |
| 241 | 244 |
| 242 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { | 245 CommandBuffer::State CommandBufferProxyImpl::GetLastState() { |
| 246 base::AutoLock lock(last_state_lock_); |
| 247 TryUpdateState(); |
| 243 return last_state_; | 248 return last_state_; |
| 244 } | 249 } |
| 245 | 250 |
| 246 int32_t CommandBufferProxyImpl::GetLastToken() { | |
| 247 TryUpdateState(); | |
| 248 return last_state_.token; | |
| 249 } | |
| 250 | |
| 251 void CommandBufferProxyImpl::Flush(int32_t put_offset) { | 251 void CommandBufferProxyImpl::Flush(int32_t put_offset) { |
| 252 CheckLock(); | 252 CheckLock(); |
| 253 base::AutoLock lock(last_state_lock_); |
| 253 if (last_state_.error != gpu::error::kNoError) | 254 if (last_state_.error != gpu::error::kNoError) |
| 254 return; | 255 return; |
| 255 | 256 |
| 256 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset", | 257 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset", |
| 257 put_offset); | 258 put_offset); |
| 258 | 259 |
| 259 bool put_offset_changed = last_put_offset_ != put_offset; | 260 bool put_offset_changed = last_put_offset_ != put_offset; |
| 260 last_put_offset_ = put_offset; | 261 last_put_offset_ = put_offset; |
| 261 last_barrier_put_offset_ = put_offset; | 262 last_barrier_put_offset_ = put_offset; |
| 262 | 263 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 276 } | 277 } |
| 277 CleanupFlushedReleases(highest_verified_flush_id); | 278 CleanupFlushedReleases(highest_verified_flush_id); |
| 278 } | 279 } |
| 279 | 280 |
| 280 if (put_offset_changed) | 281 if (put_offset_changed) |
| 281 latency_info_.clear(); | 282 latency_info_.clear(); |
| 282 } | 283 } |
| 283 | 284 |
| 284 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { | 285 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { |
| 285 CheckLock(); | 286 CheckLock(); |
| 287 base::AutoLock lock(last_state_lock_); |
| 286 if (last_state_.error != gpu::error::kNoError) | 288 if (last_state_.error != gpu::error::kNoError) |
| 287 return; | 289 return; |
| 288 | 290 |
| 289 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", | 291 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", |
| 290 put_offset); | 292 put_offset); |
| 291 | 293 |
| 292 bool put_offset_changed = last_barrier_put_offset_ != put_offset; | 294 bool put_offset_changed = last_barrier_put_offset_ != put_offset; |
| 293 last_barrier_put_offset_ = put_offset; | 295 last_barrier_put_offset_ = put_offset; |
| 294 | 296 |
| 295 if (channel_) { | 297 if (channel_) { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 326 CheckLock(); | 328 CheckLock(); |
| 327 swap_buffers_completion_callback_ = callback; | 329 swap_buffers_completion_callback_ = callback; |
| 328 } | 330 } |
| 329 | 331 |
| 330 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( | 332 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( |
| 331 const UpdateVSyncParametersCallback& callback) { | 333 const UpdateVSyncParametersCallback& callback) { |
| 332 CheckLock(); | 334 CheckLock(); |
| 333 update_vsync_parameters_completion_callback_ = callback; | 335 update_vsync_parameters_completion_callback_ = callback; |
| 334 } | 336 } |
| 335 | 337 |
| 336 void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { | 338 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange( |
| 339 int32_t start, |
| 340 int32_t end) { |
| 337 CheckLock(); | 341 CheckLock(); |
| 342 base::AutoLock lock(last_state_lock_); |
| 338 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, | 343 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, |
| 339 "end", end); | 344 "end", end); |
| 345 // Error needs to be checked in case the state was updated on another thread. |
| 346 // We need to make sure that the reentrant context loss callback is called so |
| 347 // that the share group is also lost before we return any error up the stack. |
| 348 if (last_state_.error != gpu::error::kNoError) { |
| 349 if (gpu_control_client_) |
| 350 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); |
| 351 return last_state_; |
| 352 } |
| 340 TryUpdateState(); | 353 TryUpdateState(); |
| 341 if (!InRange(start, end, last_state_.token) && | 354 if (!InRange(start, end, last_state_.token) && |
| 342 last_state_.error == gpu::error::kNoError) { | 355 last_state_.error == gpu::error::kNoError) { |
| 343 gpu::CommandBuffer::State state; | 356 gpu::CommandBuffer::State state; |
| 344 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end, | 357 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end, |
| 345 &state))) | 358 &state))) { |
| 346 SetStateFromSyncReply(state); | 359 SetStateFromSyncReply(state); |
| 360 } |
| 347 } | 361 } |
| 348 if (!InRange(start, end, last_state_.token) && | 362 if (!InRange(start, end, last_state_.token) && |
| 349 last_state_.error == gpu::error::kNoError) { | 363 last_state_.error == gpu::error::kNoError) { |
| 350 LOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; | 364 LOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; |
| 351 OnGpuSyncReplyError(); | 365 OnGpuSyncReplyError(); |
| 352 } | 366 } |
| 367 return last_state_; |
| 353 } | 368 } |
| 354 | 369 |
| 355 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, | 370 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange( |
| 356 int32_t end) { | 371 int32_t start, |
| 372 int32_t end) { |
| 357 CheckLock(); | 373 CheckLock(); |
| 374 base::AutoLock lock(last_state_lock_); |
| 358 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", | 375 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", |
| 359 start, "end", end); | 376 start, "end", end); |
| 377 // Error needs to be checked in case the state was updated on another thread. |
| 378 // We need to make sure that the reentrant context loss callback is called so |
| 379 // that the share group is also lost before we return any error up the stack. |
| 380 if (last_state_.error != gpu::error::kNoError) { |
| 381 if (gpu_control_client_) |
| 382 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); |
| 383 return last_state_; |
| 384 } |
| 360 TryUpdateState(); | 385 TryUpdateState(); |
| 361 if (!InRange(start, end, last_state_.get_offset) && | 386 if (!InRange(start, end, last_state_.get_offset) && |
| 362 last_state_.error == gpu::error::kNoError) { | 387 last_state_.error == gpu::error::kNoError) { |
| 363 gpu::CommandBuffer::State state; | 388 gpu::CommandBuffer::State state; |
| 364 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(route_id_, start, | 389 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(route_id_, start, |
| 365 end, &state))) | 390 end, &state))) |
| 366 SetStateFromSyncReply(state); | 391 SetStateFromSyncReply(state); |
| 367 } | 392 } |
| 368 if (!InRange(start, end, last_state_.get_offset) && | 393 if (!InRange(start, end, last_state_.get_offset) && |
| 369 last_state_.error == gpu::error::kNoError) { | 394 last_state_.error == gpu::error::kNoError) { |
| 370 LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; | 395 LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; |
| 371 OnGpuSyncReplyError(); | 396 OnGpuSyncReplyError(); |
| 372 } | 397 } |
| 398 return last_state_; |
| 373 } | 399 } |
| 374 | 400 |
| 375 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { | 401 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { |
| 376 CheckLock(); | 402 CheckLock(); |
| 403 base::AutoLock lock(last_state_lock_); |
| 377 if (last_state_.error != gpu::error::kNoError) | 404 if (last_state_.error != gpu::error::kNoError) |
| 378 return; | 405 return; |
| 379 | 406 |
| 380 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); | 407 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); |
| 381 last_put_offset_ = -1; | 408 last_put_offset_ = -1; |
| 382 } | 409 } |
| 383 | 410 |
| 384 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( | 411 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( |
| 385 size_t size, | 412 size_t size, |
| 386 int32_t* id) { | 413 int32_t* id) { |
| 387 CheckLock(); | 414 CheckLock(); |
| 415 base::AutoLock lock(last_state_lock_); |
| 388 *id = -1; | 416 *id = -1; |
| 389 | 417 |
| 390 if (last_state_.error != gpu::error::kNoError) | 418 if (last_state_.error != gpu::error::kNoError) |
| 391 return NULL; | 419 return NULL; |
| 392 | 420 |
| 393 int32_t new_id = channel_->ReserveTransferBufferId(); | 421 int32_t new_id = channel_->ReserveTransferBufferId(); |
| 394 | 422 |
| 395 std::unique_ptr<base::SharedMemory> shared_memory( | 423 std::unique_ptr<base::SharedMemory> shared_memory( |
| 396 channel_->factory()->AllocateSharedMemory(size)); | 424 channel_->factory()->AllocateSharedMemory(size)); |
| 397 if (!shared_memory) { | 425 if (!shared_memory) { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 421 Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id, handle, | 449 Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id, handle, |
| 422 size)); | 450 size)); |
| 423 *id = new_id; | 451 *id = new_id; |
| 424 scoped_refptr<gpu::Buffer> buffer( | 452 scoped_refptr<gpu::Buffer> buffer( |
| 425 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); | 453 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); |
| 426 return buffer; | 454 return buffer; |
| 427 } | 455 } |
| 428 | 456 |
| 429 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { | 457 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { |
| 430 CheckLock(); | 458 CheckLock(); |
| 459 base::AutoLock lock(last_state_lock_); |
| 431 if (last_state_.error != gpu::error::kNoError) | 460 if (last_state_.error != gpu::error::kNoError) |
| 432 return; | 461 return; |
| 433 | 462 |
| 434 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | 463 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); |
| 435 } | 464 } |
| 436 | 465 |
| 437 void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) { | 466 void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) { |
| 438 CheckLock(); | 467 CheckLock(); |
| 439 gpu_control_client_ = client; | 468 gpu_control_client_ = client; |
| 440 } | 469 } |
| 441 | 470 |
| 442 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { | 471 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { |
| 443 return capabilities_; | 472 return capabilities_; |
| 444 } | 473 } |
| 445 | 474 |
| 446 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, | 475 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, |
| 447 size_t width, | 476 size_t width, |
| 448 size_t height, | 477 size_t height, |
| 449 unsigned internal_format) { | 478 unsigned internal_format) { |
| 450 CheckLock(); | 479 CheckLock(); |
| 480 base::AutoLock lock(last_state_lock_); |
| 451 if (last_state_.error != gpu::error::kNoError) | 481 if (last_state_.error != gpu::error::kNoError) |
| 452 return -1; | 482 return -1; |
| 453 | 483 |
| 454 int32_t new_id = channel_->ReserveImageId(); | 484 int32_t new_id = channel_->ReserveImageId(); |
| 455 | 485 |
| 456 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = | 486 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = |
| 457 channel_->gpu_memory_buffer_manager(); | 487 channel_->gpu_memory_buffer_manager(); |
| 458 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 488 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
| 459 reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer); | 489 reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer); |
| 460 DCHECK(gpu_memory_buffer); | 490 DCHECK(gpu_memory_buffer); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 501 | 531 |
| 502 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, | 532 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, |
| 503 sync_token); | 533 sync_token); |
| 504 } | 534 } |
| 505 | 535 |
| 506 return new_id; | 536 return new_id; |
| 507 } | 537 } |
| 508 | 538 |
| 509 void CommandBufferProxyImpl::DestroyImage(int32_t id) { | 539 void CommandBufferProxyImpl::DestroyImage(int32_t id) { |
| 510 CheckLock(); | 540 CheckLock(); |
| 541 base::AutoLock lock(last_state_lock_); |
| 511 if (last_state_.error != gpu::error::kNoError) | 542 if (last_state_.error != gpu::error::kNoError) |
| 512 return; | 543 return; |
| 513 | 544 |
| 514 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); | 545 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); |
| 515 } | 546 } |
| 516 | 547 |
| 517 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( | 548 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( |
| 518 size_t width, | 549 size_t width, |
| 519 size_t height, | 550 size_t height, |
| 520 unsigned internal_format, | 551 unsigned internal_format, |
| 521 unsigned usage) { | 552 unsigned usage) { |
| 522 CheckLock(); | 553 CheckLock(); |
| 523 std::unique_ptr<gfx::GpuMemoryBuffer> buffer( | 554 std::unique_ptr<gfx::GpuMemoryBuffer> buffer( |
| 524 channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer( | 555 channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer( |
| 525 gfx::Size(width, height), | 556 gfx::Size(width, height), |
| 526 gpu::DefaultBufferFormatForImageFormat(internal_format), | 557 gpu::DefaultBufferFormatForImageFormat(internal_format), |
| 527 gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle)); | 558 gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle)); |
| 528 if (!buffer) | 559 if (!buffer) |
| 529 return -1; | 560 return -1; |
| 530 | 561 |
| 531 int32_t result = | 562 int32_t result = |
| 532 CreateImage(buffer->AsClientBuffer(), width, height, internal_format); | 563 CreateImage(buffer->AsClientBuffer(), width, height, internal_format); |
| 533 return result; | 564 return result; |
| 534 } | 565 } |
| 535 | 566 |
| 536 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { | 567 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { |
| 537 CheckLock(); | 568 CheckLock(); |
| 569 base::AutoLock lock(last_state_lock_); |
| 538 if (last_state_.error != gpu::error::kNoError) | 570 if (last_state_.error != gpu::error::kNoError) |
| 539 return 0; | 571 return 0; |
| 540 | 572 |
| 541 int32_t stream_id = channel_->GenerateRouteID(); | 573 int32_t stream_id = channel_->GenerateRouteID(); |
| 542 bool succeeded = false; | 574 bool succeeded = false; |
| 543 Send(new GpuCommandBufferMsg_CreateStreamTexture(route_id_, texture_id, | 575 Send(new GpuCommandBufferMsg_CreateStreamTexture(route_id_, texture_id, |
| 544 stream_id, &succeeded)); | 576 stream_id, &succeeded)); |
| 545 if (!succeeded) { | 577 if (!succeeded) { |
| 546 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; | 578 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; |
| 547 return 0; | 579 return 0; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 580 return release != 0 && release < next_fence_sync_release_; | 612 return release != 0 && release < next_fence_sync_release_; |
| 581 } | 613 } |
| 582 | 614 |
| 583 bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { | 615 bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { |
| 584 CheckLock(); | 616 CheckLock(); |
| 585 return release != 0 && release <= flushed_fence_sync_release_; | 617 return release != 0 && release <= flushed_fence_sync_release_; |
| 586 } | 618 } |
| 587 | 619 |
| 588 bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { | 620 bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { |
| 589 CheckLock(); | 621 CheckLock(); |
| 622 base::AutoLock lock(last_state_lock_); |
| 590 if (last_state_.error != gpu::error::kNoError) | 623 if (last_state_.error != gpu::error::kNoError) |
| 591 return false; | 624 return false; |
| 592 | 625 |
| 593 if (release <= verified_fence_sync_release_) | 626 if (release <= verified_fence_sync_release_) |
| 594 return true; | 627 return true; |
| 595 | 628 |
| 596 // Check if we have actually flushed the fence sync release. | 629 // Check if we have actually flushed the fence sync release. |
| 597 if (release <= flushed_fence_sync_release_) { | 630 if (release <= flushed_fence_sync_release_) { |
| 598 DCHECK(!flushed_release_flush_id_.empty()); | 631 DCHECK(!flushed_release_flush_id_.empty()); |
| 599 // Check if it has already been validated by another context. | 632 // Check if it has already been validated by another context. |
| 600 UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); | 633 UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); |
| 601 if (release <= verified_fence_sync_release_) | 634 if (release <= verified_fence_sync_release_) |
| 602 return true; | 635 return true; |
| 603 | 636 |
| 604 // Has not been validated, validate it now. | 637 // Has not been validated, validate it now. |
| 605 UpdateVerifiedReleases( | 638 UpdateVerifiedReleases( |
| 606 channel_->ValidateFlushIDReachedServer(stream_id_, false)); | 639 channel_->ValidateFlushIDReachedServer(stream_id_, false)); |
| 607 return release <= verified_fence_sync_release_; | 640 return release <= verified_fence_sync_release_; |
| 608 } | 641 } |
| 609 | 642 |
| 610 return false; | 643 return false; |
| 611 } | 644 } |
| 612 | 645 |
| 646 // This can be called from any thread without holding |lock_|. Use a thread-safe |
| 647 // non-error throwing variant of TryUpdateState for this. |
| 648 bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) { |
| 649 base::AutoLock lock(last_state_lock_); |
| 650 TryUpdateStateThreadSafe(); |
| 651 return release <= last_state_.release_count; |
| 652 } |
| 653 |
| 613 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, | 654 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, |
| 614 const base::Closure& callback) { | 655 const base::Closure& callback) { |
| 615 CheckLock(); | 656 CheckLock(); |
| 657 base::AutoLock lock(last_state_lock_); |
| 616 if (last_state_.error != gpu::error::kNoError) | 658 if (last_state_.error != gpu::error::kNoError) |
| 617 return; | 659 return; |
| 618 | 660 |
| 619 uint32_t signal_id = next_signal_id_++; | 661 uint32_t signal_id = next_signal_id_++; |
| 620 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, | 662 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, |
| 621 signal_id)); | 663 signal_id)); |
| 622 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 664 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
| 623 } | 665 } |
| 624 | 666 |
| 625 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( | 667 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( |
| (...skipping 14 matching lines...) Expand all Loading... |
| 640 | 682 |
| 641 if (release_stream_id != stream_id_) | 683 if (release_stream_id != stream_id_) |
| 642 channel_->FlushPendingStream(release_stream_id); | 684 channel_->FlushPendingStream(release_stream_id); |
| 643 | 685 |
| 644 return true; | 686 return true; |
| 645 } | 687 } |
| 646 | 688 |
| 647 void CommandBufferProxyImpl::SignalQuery(uint32_t query, | 689 void CommandBufferProxyImpl::SignalQuery(uint32_t query, |
| 648 const base::Closure& callback) { | 690 const base::Closure& callback) { |
| 649 CheckLock(); | 691 CheckLock(); |
| 692 base::AutoLock lock(last_state_lock_); |
| 650 if (last_state_.error != gpu::error::kNoError) | 693 if (last_state_.error != gpu::error::kNoError) |
| 651 return; | 694 return; |
| 652 | 695 |
| 653 // Signal identifiers are hidden, so nobody outside of this class will see | 696 // Signal identifiers are hidden, so nobody outside of this class will see |
| 654 // them. (And thus, they cannot save them.) The IDs themselves only last | 697 // them. (And thus, they cannot save them.) The IDs themselves only last |
| 655 // until the callback is invoked, which will happen as soon as the GPU | 698 // until the callback is invoked, which will happen as soon as the GPU |
| 656 // catches upwith the command buffer. | 699 // catches upwith the command buffer. |
| 657 // A malicious caller trying to create a collision by making next_signal_id | 700 // A malicious caller trying to create a collision by making next_signal_id |
| 658 // would have to make calls at an astounding rate (300B/s) and even if they | 701 // would have to make calls at an astounding rate (300B/s) and even if they |
| 659 // could do that, all they would do is to prevent some callbacks from getting | 702 // could do that, all they would do is to prevent some callbacks from getting |
| 660 // called, leading to stalled threads and/or memory leaks. | 703 // called, leading to stalled threads and/or memory leaks. |
| 661 uint32_t signal_id = next_signal_id_++; | 704 uint32_t signal_id = next_signal_id_++; |
| 662 Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id)); | 705 Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id)); |
| 663 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 706 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
| 664 } | 707 } |
| 665 | 708 |
| 666 void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) { | 709 void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) { |
| 667 CheckLock(); | 710 CheckLock(); |
| 711 base::AutoLock lock(last_state_lock_); |
| 668 if (last_state_.error != gpu::error::kNoError) | 712 if (last_state_.error != gpu::error::kNoError) |
| 669 return; | 713 return; |
| 670 | 714 |
| 671 Send(new GpuCommandBufferMsg_TakeFrontBuffer(route_id_, mailbox)); | 715 Send(new GpuCommandBufferMsg_TakeFrontBuffer(route_id_, mailbox)); |
| 672 } | 716 } |
| 673 | 717 |
| 674 void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, | 718 void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, |
| 675 const gpu::SyncToken& sync_token, | 719 const gpu::SyncToken& sync_token, |
| 676 bool is_lost) { | 720 bool is_lost) { |
| 677 CheckLock(); | 721 CheckLock(); |
| 722 base::AutoLock lock(last_state_lock_); |
| 678 if (last_state_.error != gpu::error::kNoError) | 723 if (last_state_.error != gpu::error::kNoError) |
| 679 return; | 724 return; |
| 680 | 725 |
| 681 Send(new GpuCommandBufferMsg_WaitSyncToken(route_id_, sync_token)); | 726 Send(new GpuCommandBufferMsg_WaitSyncToken(route_id_, sync_token)); |
| 682 Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost)); | 727 Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost)); |
| 683 } | 728 } |
| 684 | 729 |
| 685 gpu::error::Error CommandBufferProxyImpl::GetLastError() { | 730 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { |
| 686 return last_state_.error; | 731 DCHECK(channel_); |
| 687 } | 732 last_state_lock_.AssertAcquired(); |
| 733 DCHECK_EQ(gpu::error::kNoError, last_state_.error); |
| 688 | 734 |
| 689 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { | 735 last_state_lock_.Release(); |
| 690 // Caller should not intentionally send a message if the context is lost. | |
| 691 DCHECK(last_state_.error == gpu::error::kNoError); | |
| 692 DCHECK(channel_); | |
| 693 | 736 |
| 694 if (!msg->is_sync()) { | 737 // Call is_sync() before sending message. |
| 695 bool result = channel_->Send(msg); | 738 bool is_sync = msg->is_sync(); |
| 696 // Send() should always return true for async messages. | 739 bool result = channel_->Send(msg); |
| 697 DCHECK(result); | 740 // Send() should always return true for async messages. |
| 698 return true; | 741 DCHECK(is_sync || result); |
| 742 |
| 743 last_state_lock_.Acquire(); |
| 744 |
| 745 if (last_state_.error != gpu::error::kNoError) { |
| 746 // Error needs to be checked in case the state was updated on another thread |
| 747 // while we were waiting on Send. We need to make sure that the reentrant |
| 748 // context loss callback is called so that the share group is also lost |
| 749 // before we return any error up the stack. |
| 750 if (gpu_control_client_) |
| 751 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); |
| 752 return false; |
| 699 } | 753 } |
| 700 | 754 |
| 701 if (channel_->Send(msg)) | 755 if (!result) { |
| 702 return true; | 756 // Flag the command buffer as lost. Defer deleting the channel until |
| 757 // OnChannelError is called after returning to the message loop in case it |
| 758 // is referenced elsewhere. |
| 759 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; |
| 760 OnClientError(gpu::error::kLostContext); |
| 761 return false; |
| 762 } |
| 703 | 763 |
| 704 // Flag the command buffer as lost. Defer deleting the channel until | 764 return true; |
| 705 // OnChannelError is called after returning to the message loop in case | |
| 706 // it is referenced elsewhere. | |
| 707 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; | |
| 708 OnClientError(gpu::error::kLostContext); | |
| 709 return false; | |
| 710 } | 765 } |
| 711 | 766 |
| 712 void CommandBufferProxyImpl::SetStateFromSyncReply( | 767 void CommandBufferProxyImpl::SetStateFromSyncReply( |
| 713 const gpu::CommandBuffer::State& state) { | 768 const gpu::CommandBuffer::State& state) { |
| 714 DCHECK(last_state_.error == gpu::error::kNoError); | 769 CheckLock(); |
| 770 last_state_lock_.AssertAcquired(); |
| 715 // Handle wraparound. It works as long as we don't have more than 2B state | 771 // Handle wraparound. It works as long as we don't have more than 2B state |
| 716 // updates in flight across which reordering occurs. | 772 // updates in flight across which reordering occurs. |
| 717 if (state.generation - last_state_.generation < 0x80000000U) | 773 if (state.generation - last_state_.generation < 0x80000000U) |
| 718 last_state_ = state; | 774 last_state_ = state; |
| 719 if (last_state_.error != gpu::error::kNoError) | 775 if (last_state_.error != gpu::error::kNoError) |
| 720 OnGpuStateError(); | 776 OnGpuStateError(); |
| 721 } | 777 } |
| 722 | 778 |
| 723 void CommandBufferProxyImpl::TryUpdateState() { | 779 void CommandBufferProxyImpl::TryUpdateState() { |
| 780 CheckLock(); |
| 781 last_state_lock_.AssertAcquired(); |
| 724 if (last_state_.error == gpu::error::kNoError) { | 782 if (last_state_.error == gpu::error::kNoError) { |
| 725 shared_state()->Read(&last_state_); | 783 shared_state()->Read(&last_state_); |
| 726 if (last_state_.error != gpu::error::kNoError) | 784 if (last_state_.error != gpu::error::kNoError) |
| 727 OnGpuStateError(); | 785 OnGpuStateError(); |
| 728 } | 786 } |
| 729 } | 787 } |
| 730 | 788 |
| 789 void CommandBufferProxyImpl::TryUpdateStateThreadSafe() { |
| 790 last_state_lock_.AssertAcquired(); |
| 791 if (last_state_.error == gpu::error::kNoError) { |
| 792 shared_state()->Read(&last_state_); |
| 793 if (last_state_.error != gpu::error::kNoError) { |
| 794 callback_thread_->PostTask( |
| 795 FROM_HERE, |
| 796 base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, |
| 797 weak_this_)); |
| 798 } |
| 799 } |
| 800 } |
| 801 |
| 731 void CommandBufferProxyImpl::TryUpdateStateDontReportError() { | 802 void CommandBufferProxyImpl::TryUpdateStateDontReportError() { |
| 803 last_state_lock_.AssertAcquired(); |
| 732 if (last_state_.error == gpu::error::kNoError) | 804 if (last_state_.error == gpu::error::kNoError) |
| 733 shared_state()->Read(&last_state_); | 805 shared_state()->Read(&last_state_); |
| 734 } | 806 } |
| 735 | 807 |
| 736 void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { | 808 void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { |
| 737 while (!flushed_release_flush_id_.empty()) { | 809 while (!flushed_release_flush_id_.empty()) { |
| 738 const std::pair<uint64_t, uint32_t>& front_item = | 810 const std::pair<uint64_t, uint32_t>& front_item = |
| 739 flushed_release_flush_id_.front(); | 811 flushed_release_flush_id_.front(); |
| 740 if (front_item.second > verified_flush) | 812 if (front_item.second > verified_flush) |
| 741 break; | 813 break; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 792 } | 864 } |
| 793 } | 865 } |
| 794 | 866 |
| 795 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, | 867 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, |
| 796 base::TimeDelta interval) { | 868 base::TimeDelta interval) { |
| 797 if (!update_vsync_parameters_completion_callback_.is_null()) | 869 if (!update_vsync_parameters_completion_callback_.is_null()) |
| 798 update_vsync_parameters_completion_callback_.Run(timebase, interval); | 870 update_vsync_parameters_completion_callback_.Run(timebase, interval); |
| 799 } | 871 } |
| 800 | 872 |
| 801 void CommandBufferProxyImpl::OnGpuSyncReplyError() { | 873 void CommandBufferProxyImpl::OnGpuSyncReplyError() { |
| 874 CheckLock(); |
| 875 last_state_lock_.AssertAcquired(); |
| 802 last_state_.error = gpu::error::kLostContext; | 876 last_state_.error = gpu::error::kLostContext; |
| 803 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; | 877 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; |
| 804 // This method may be inside a callstack from the GpuControlClient (we got a | 878 // This method may be inside a callstack from the GpuControlClient (we got a |
| 805 // bad reply to something we are sending to the GPU process). So avoid | 879 // bad reply to something we are sending to the GPU process). So avoid |
| 806 // re-entering the GpuControlClient here. | 880 // re-entering the GpuControlClient here. |
| 807 DisconnectChannelInFreshCallStack(); | 881 DisconnectChannelInFreshCallStack(); |
| 808 } | 882 } |
| 809 | 883 |
| 810 void CommandBufferProxyImpl::OnGpuAsyncMessageError( | 884 void CommandBufferProxyImpl::OnGpuAsyncMessageError( |
| 811 gpu::error::ContextLostReason reason, | 885 gpu::error::ContextLostReason reason, |
| 812 gpu::error::Error error) { | 886 gpu::error::Error error) { |
| 813 CheckLock(); | 887 CheckLock(); |
| 888 last_state_lock_.AssertAcquired(); |
| 814 last_state_.error = error; | 889 last_state_.error = error; |
| 815 last_state_.context_lost_reason = reason; | 890 last_state_.context_lost_reason = reason; |
| 816 // This method only occurs when receiving IPC messages, so we know it's not in | 891 // This method only occurs when receiving IPC messages, so we know it's not in |
| 817 // a callstack from the GpuControlClient. | 892 // a callstack from the GpuControlClient. Unlock the state lock to prevent |
| 893 // a deadlock when calling the context loss callback. |
| 894 base::AutoUnlock unlock(last_state_lock_); |
| 818 DisconnectChannel(); | 895 DisconnectChannel(); |
| 819 } | 896 } |
| 820 | 897 |
| 821 void CommandBufferProxyImpl::OnGpuStateError() { | 898 void CommandBufferProxyImpl::OnGpuStateError() { |
| 822 DCHECK(last_state_.error != gpu::error::kNoError); | 899 CheckLock(); |
| 900 last_state_lock_.AssertAcquired(); |
| 901 DCHECK_NE(gpu::error::kNoError, last_state_.error); |
| 823 // This method may be inside a callstack from the GpuControlClient (we | 902 // This method may be inside a callstack from the GpuControlClient (we |
| 824 // encountered an error while trying to perform some action). So avoid | 903 // encountered an error while trying to perform some action). So avoid |
| 825 // re-entering the GpuControlClient here. | 904 // re-entering the GpuControlClient here. |
| 826 DisconnectChannelInFreshCallStack(); | 905 DisconnectChannelInFreshCallStack(); |
| 827 } | 906 } |
| 828 | 907 |
| 829 void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { | 908 void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { |
| 830 CheckLock(); | 909 CheckLock(); |
| 910 last_state_lock_.AssertAcquired(); |
| 831 last_state_.error = error; | 911 last_state_.error = error; |
| 832 last_state_.context_lost_reason = gpu::error::kUnknown; | 912 last_state_.context_lost_reason = gpu::error::kUnknown; |
| 833 // This method may be inside a callstack from the GpuControlClient (we | 913 // This method may be inside a callstack from the GpuControlClient (we |
| 834 // encountered an error while trying to perform some action). So avoid | 914 // encountered an error while trying to perform some action). So avoid |
| 835 // re-entering the GpuControlClient here. | 915 // re-entering the GpuControlClient here. |
| 836 DisconnectChannelInFreshCallStack(); | 916 DisconnectChannelInFreshCallStack(); |
| 837 } | 917 } |
| 838 | 918 |
| 839 void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() { | 919 void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() { |
| 840 CheckLock(); | 920 CheckLock(); |
| 921 last_state_lock_.AssertAcquired(); |
| 841 // Inform the GpuControlClient of the lost state immediately, though this may | 922 // Inform the GpuControlClient of the lost state immediately, though this may |
| 842 // be a re-entrant call to the client so we use the MaybeReentrant variant. | 923 // be a re-entrant call to the client so we use the MaybeReentrant variant. |
| 843 if (gpu_control_client_) | 924 if (gpu_control_client_) |
| 844 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); | 925 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); |
| 845 // Create a fresh call stack to keep the |channel_| alive while we unwind the | 926 // Create a fresh call stack to keep the |channel_| alive while we unwind the |
| 846 // stack in case things will use it, and give the GpuChannelClient a chance to | 927 // stack in case things will use it, and give the GpuChannelClient a chance to |
| 847 // act fully on the lost context. | 928 // act fully on the lost context. |
| 848 callback_thread_->PostTask( | 929 callback_thread_->PostTask( |
| 849 FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, | 930 FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, |
| 850 weak_this_)); | 931 weak_this_)); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 865 return; | 946 return; |
| 866 channel_->FlushPendingStream(stream_id_); | 947 channel_->FlushPendingStream(stream_id_); |
| 867 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); | 948 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); |
| 868 channel_->RemoveRoute(route_id_); | 949 channel_->RemoveRoute(route_id_); |
| 869 channel_ = nullptr; | 950 channel_ = nullptr; |
| 870 if (gpu_control_client_) | 951 if (gpu_control_client_) |
| 871 gpu_control_client_->OnGpuControlLostContext(); | 952 gpu_control_client_->OnGpuControlLostContext(); |
| 872 } | 953 } |
| 873 | 954 |
| 874 } // namespace gpu | 955 } // namespace gpu |
| OLD | NEW |