Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" | 5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 #include <vector> | 8 #include <vector> |
| 9 | 9 |
| 10 #include "base/callback.h" | 10 #include "base/callback.h" |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 123 | 123 |
| 124 if (!handled) { | 124 if (!handled) { |
| 125 LOG(ERROR) << "Gpu process sent invalid message."; | 125 LOG(ERROR) << "Gpu process sent invalid message."; |
| 126 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, | 126 OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage, |
| 127 gpu::error::kLostContext); | 127 gpu::error::kLostContext); |
| 128 } | 128 } |
| 129 return handled; | 129 return handled; |
| 130 } | 130 } |
| 131 | 131 |
| 132 void CommandBufferProxyImpl::OnChannelError() { | 132 void CommandBufferProxyImpl::OnChannelError() { |
| 133 std::unique_ptr<base::AutoLock> lock; | 133 base::Optional<base::AutoLock> lock; |
| 134 if (lock_) | 134 if (lock_) |
| 135 lock.reset(new base::AutoLock(*lock_)); | 135 lock.emplace(*lock_); |
| 136 base::AutoLock state_lock(state_lock_); | |
| 136 | 137 |
| 137 gpu::error::ContextLostReason context_lost_reason = | 138 gpu::error::ContextLostReason context_lost_reason = |
| 138 gpu::error::kGpuChannelLost; | 139 gpu::error::kGpuChannelLost; |
| 139 if (shared_state_shm_ && shared_state_shm_->memory()) { | 140 if (shared_state_shm_ && shared_state_shm_->memory()) { |
| 140 // The GPU process might have intentionally been crashed | 141 // The GPU process might have intentionally been crashed |
| 141 // (exit_on_context_lost), so try to find out the original reason. | 142 // (exit_on_context_lost), so try to find out the original reason. |
| 142 TryUpdateStateDontReportError(); | 143 TryUpdateStateDontReportError(); |
| 143 if (last_state_.error == gpu::error::kLostContext) | 144 if (last_state_.error == gpu::error::kLostContext) |
| 144 context_lost_reason = last_state_.context_lost_reason; | 145 context_lost_reason = last_state_.context_lost_reason; |
| 145 } | 146 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 203 shared_state()->Initialize(); | 204 shared_state()->Initialize(); |
| 204 | 205 |
| 205 // This handle is owned by the GPU process and must be passed to it or it | 206 // This handle is owned by the GPU process and must be passed to it or it |
| 206 // will leak. In otherwords, do not early out on error between here and the | 207 // will leak. In otherwords, do not early out on error between here and the |
| 207 // sending of the CreateCommandBuffer IPC below. | 208 // sending of the CreateCommandBuffer IPC below. |
| 208 base::SharedMemoryHandle handle = | 209 base::SharedMemoryHandle handle = |
| 209 channel->ShareToGpuProcess(shared_state_shm_->handle()); | 210 channel->ShareToGpuProcess(shared_state_shm_->handle()); |
| 210 if (!base::SharedMemory::IsHandleValid(handle)) | 211 if (!base::SharedMemory::IsHandleValid(handle)) |
| 211 return false; | 212 return false; |
| 212 | 213 |
| 213 | |
| 214 // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. | 214 // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. |
| 215 tracked_objects::ScopedTracker tracking_profile( | 215 tracked_objects::ScopedTracker tracking_profile( |
| 216 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 216 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
| 217 "125248 CommandBufferProxyImpl::Initialize")); | 217 "125248 CommandBufferProxyImpl::Initialize")); |
| 218 | 218 |
| 219 // Route must be added before sending the message, otherwise messages sent | 219 // Route must be added before sending the message, otherwise messages sent |
| 220 // from the GPU process could race against adding ourselves to the filter. | 220 // from the GPU process could race against adding ourselves to the filter. |
| 221 channel->AddRouteWithTaskRunner(route_id_, AsWeakPtr(), task_runner); | 221 channel->AddRouteWithTaskRunner(route_id_, AsWeakPtr(), task_runner); |
| 222 | 222 |
| 223 // We're blocking the UI thread, which is generally undesirable. | 223 // We're blocking the UI thread, which is generally undesirable. |
| 224 // In this case we need to wait for this before we can show any UI /anyway/, | 224 // In this case we need to wait for this before we can show any UI /anyway/, |
| 225 // so it won't cause additional jank. | 225 // so it won't cause additional jank. |
| 226 // TODO(piman): Make this asynchronous (http://crbug.com/125248). | 226 // TODO(piman): Make this asynchronous (http://crbug.com/125248). |
| 227 bool result = false; | 227 bool result = false; |
| 228 bool sent = channel->Send(new GpuChannelMsg_CreateCommandBuffer( | 228 bool sent = channel->Send(new GpuChannelMsg_CreateCommandBuffer( |
| 229 config, route_id_, handle, &result, &capabilities_)); | 229 config, route_id_, handle, &result, &capabilities_)); |
| 230 if (!sent || !result) { | 230 if (!sent || !result) { |
| 231 DLOG(ERROR) << "Failed to send GpuChannelMsg_CreateCommandBuffer."; | 231 DLOG(ERROR) << "Failed to send GpuChannelMsg_CreateCommandBuffer."; |
| 232 channel->RemoveRoute(route_id_); | 232 channel->RemoveRoute(route_id_); |
| 233 return false; | 233 return false; |
| 234 } | 234 } |
| 235 | 235 |
| 236 channel_ = std::move(channel); | 236 channel_ = std::move(channel); |
| 237 callback_thread_ = std::move(task_runner); | 237 callback_thread_ = std::move(task_runner); |
| 238 | 238 |
| 239 return true; | 239 return true; |
| 240 } | 240 } |
| 241 | 241 |
| 242 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { | 242 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { |
| 243 base::AutoLock lock(state_lock_); | |
| 244 TryUpdateState(); | |
| 243 return last_state_; | 245 return last_state_; |
| 244 } | 246 } |
| 245 | 247 |
| 246 int32_t CommandBufferProxyImpl::GetLastToken() { | 248 int32_t CommandBufferProxyImpl::GetLastToken() { |
|
piman
2016/12/02 19:21:48
nit: maybe return GetLastState().token ?
Maybe we
sunnyps
2016/12/07 03:31:22
Got rid of both GetLastToken and GetLastError. Thi
| |
| 249 base::AutoLock lock(state_lock_); | |
| 247 TryUpdateState(); | 250 TryUpdateState(); |
| 248 return last_state_.token; | 251 return last_state_.token; |
| 249 } | 252 } |
| 250 | 253 |
| 251 void CommandBufferProxyImpl::Flush(int32_t put_offset) { | 254 void CommandBufferProxyImpl::Flush(int32_t put_offset) { |
| 252 CheckLock(); | 255 CheckLock(); |
| 256 base::AutoLock lock(state_lock_); | |
| 253 if (last_state_.error != gpu::error::kNoError) | 257 if (last_state_.error != gpu::error::kNoError) |
| 254 return; | 258 return; |
| 255 | 259 |
| 256 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset", | 260 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset", |
| 257 put_offset); | 261 put_offset); |
| 258 | 262 |
| 259 bool put_offset_changed = last_put_offset_ != put_offset; | 263 bool put_offset_changed = last_put_offset_ != put_offset; |
| 260 last_put_offset_ = put_offset; | 264 last_put_offset_ = put_offset; |
| 261 last_barrier_put_offset_ = put_offset; | 265 last_barrier_put_offset_ = put_offset; |
| 262 | 266 |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 276 } | 280 } |
| 277 CleanupFlushedReleases(highest_verified_flush_id); | 281 CleanupFlushedReleases(highest_verified_flush_id); |
| 278 } | 282 } |
| 279 | 283 |
| 280 if (put_offset_changed) | 284 if (put_offset_changed) |
| 281 latency_info_.clear(); | 285 latency_info_.clear(); |
| 282 } | 286 } |
| 283 | 287 |
| 284 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { | 288 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { |
| 285 CheckLock(); | 289 CheckLock(); |
| 290 base::AutoLock lock(state_lock_); | |
| 286 if (last_state_.error != gpu::error::kNoError) | 291 if (last_state_.error != gpu::error::kNoError) |
| 287 return; | 292 return; |
| 288 | 293 |
| 289 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", | 294 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", |
| 290 put_offset); | 295 put_offset); |
| 291 | 296 |
| 292 bool put_offset_changed = last_barrier_put_offset_ != put_offset; | 297 bool put_offset_changed = last_barrier_put_offset_ != put_offset; |
| 293 last_barrier_put_offset_ = put_offset; | 298 last_barrier_put_offset_ = put_offset; |
| 294 | 299 |
| 295 if (channel_) { | 300 if (channel_) { |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 326 CheckLock(); | 331 CheckLock(); |
| 327 swap_buffers_completion_callback_ = callback; | 332 swap_buffers_completion_callback_ = callback; |
| 328 } | 333 } |
| 329 | 334 |
| 330 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( | 335 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( |
| 331 const UpdateVSyncParametersCallback& callback) { | 336 const UpdateVSyncParametersCallback& callback) { |
| 332 CheckLock(); | 337 CheckLock(); |
| 333 update_vsync_parameters_completion_callback_ = callback; | 338 update_vsync_parameters_completion_callback_ = callback; |
| 334 } | 339 } |
| 335 | 340 |
| 336 void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { | 341 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange( |
| 342 int32_t start, | |
| 343 int32_t end) { | |
| 337 CheckLock(); | 344 CheckLock(); |
| 345 base::AutoLock lock(state_lock_); | |
| 338 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, | 346 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, |
| 339 "end", end); | 347 "end", end); |
| 348 // Error needs to be checked in case the state was updated on another thread. | |
| 349 // We need to make sure that the reentrant context loss callback is called so | |
| 350 // that the share group is also lost before this method returns. | |
| 351 if (last_state_.error != gpu::error::kNoError) { | |
| 352 OnGpuStateError(); | |
|
piman
2016/12/02 19:21:48
I don't think OnGpuStateError is right, because th
sunnyps
2016/12/07 03:31:22
DisconnectChannel (the posted task) early outs if
| |
| 353 return last_state_; | |
| 354 } | |
| 340 TryUpdateState(); | 355 TryUpdateState(); |
| 341 if (!InRange(start, end, last_state_.token) && | 356 if (!InRange(start, end, last_state_.token) && |
| 342 last_state_.error == gpu::error::kNoError) { | 357 last_state_.error == gpu::error::kNoError) { |
| 343 gpu::CommandBuffer::State state; | 358 gpu::CommandBuffer::State state; |
| 344 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end, | 359 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end, |
| 345 &state))) | 360 &state))) { |
| 346 SetStateFromSyncReply(state); | 361 SetStateFromSyncReply(state); |
| 362 } | |
| 347 } | 363 } |
| 348 if (!InRange(start, end, last_state_.token) && | 364 if (!InRange(start, end, last_state_.token) && |
| 349 last_state_.error == gpu::error::kNoError) { | 365 last_state_.error == gpu::error::kNoError) { |
| 350 LOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; | 366 LOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; |
| 351 OnGpuSyncReplyError(); | 367 OnGpuSyncReplyError(); |
| 352 } | 368 } |
| 369 return last_state_; | |
| 353 } | 370 } |
| 354 | 371 |
| 355 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, | 372 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange( |
| 356 int32_t end) { | 373 int32_t start, |
| 374 int32_t end) { | |
| 357 CheckLock(); | 375 CheckLock(); |
| 376 base::AutoLock lock(state_lock_); | |
| 358 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", | 377 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", |
| 359 start, "end", end); | 378 start, "end", end); |
| 379 // Error needs to be checked in case the state was updated on another thread. | |
| 380 // We need to make sure that the reentrant context loss callback is called so | |
| 381 // that the share group is also lost before this method returns. | |
| 382 if (last_state_.error != gpu::error::kNoError) { | |
| 383 OnGpuStateError(); | |
|
piman
2016/12/02 19:21:48
Ditto
sunnyps
2016/12/07 03:31:22
Done.
| |
| 384 return last_state_; | |
| 385 } | |
| 360 TryUpdateState(); | 386 TryUpdateState(); |
| 361 if (!InRange(start, end, last_state_.get_offset) && | 387 if (!InRange(start, end, last_state_.get_offset) && |
| 362 last_state_.error == gpu::error::kNoError) { | 388 last_state_.error == gpu::error::kNoError) { |
| 363 gpu::CommandBuffer::State state; | 389 gpu::CommandBuffer::State state; |
| 364 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(route_id_, start, | 390 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(route_id_, start, |
| 365 end, &state))) | 391 end, &state))) |
| 366 SetStateFromSyncReply(state); | 392 SetStateFromSyncReply(state); |
| 367 } | 393 } |
| 368 if (!InRange(start, end, last_state_.get_offset) && | 394 if (!InRange(start, end, last_state_.get_offset) && |
| 369 last_state_.error == gpu::error::kNoError) { | 395 last_state_.error == gpu::error::kNoError) { |
| 370 LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; | 396 LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; |
| 371 OnGpuSyncReplyError(); | 397 OnGpuSyncReplyError(); |
| 372 } | 398 } |
| 399 return last_state_; | |
| 373 } | 400 } |
| 374 | 401 |
| 375 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { | 402 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { |
| 376 CheckLock(); | 403 CheckLock(); |
| 404 base::AutoLock lock(state_lock_); | |
| 377 if (last_state_.error != gpu::error::kNoError) | 405 if (last_state_.error != gpu::error::kNoError) |
| 378 return; | 406 return; |
| 379 | 407 |
| 380 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); | 408 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); |
| 381 last_put_offset_ = -1; | 409 last_put_offset_ = -1; |
| 382 } | 410 } |
| 383 | 411 |
| 384 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( | 412 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( |
| 385 size_t size, | 413 size_t size, |
| 386 int32_t* id) { | 414 int32_t* id) { |
| 387 CheckLock(); | 415 CheckLock(); |
| 416 base::AutoLock lock(state_lock_); | |
| 388 *id = -1; | 417 *id = -1; |
| 389 | 418 |
| 390 if (last_state_.error != gpu::error::kNoError) | 419 if (last_state_.error != gpu::error::kNoError) |
| 391 return NULL; | 420 return NULL; |
| 392 | 421 |
| 393 int32_t new_id = channel_->ReserveTransferBufferId(); | 422 int32_t new_id = channel_->ReserveTransferBufferId(); |
| 394 | 423 |
| 395 std::unique_ptr<base::SharedMemory> shared_memory( | 424 std::unique_ptr<base::SharedMemory> shared_memory( |
| 396 channel_->factory()->AllocateSharedMemory(size)); | 425 channel_->factory()->AllocateSharedMemory(size)); |
| 397 if (!shared_memory) { | 426 if (!shared_memory) { |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 421 Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id, handle, | 450 Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id, handle, |
| 422 size)); | 451 size)); |
| 423 *id = new_id; | 452 *id = new_id; |
| 424 scoped_refptr<gpu::Buffer> buffer( | 453 scoped_refptr<gpu::Buffer> buffer( |
| 425 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); | 454 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); |
| 426 return buffer; | 455 return buffer; |
| 427 } | 456 } |
| 428 | 457 |
| 429 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { | 458 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { |
| 430 CheckLock(); | 459 CheckLock(); |
| 460 base::AutoLock lock(state_lock_); | |
| 431 if (last_state_.error != gpu::error::kNoError) | 461 if (last_state_.error != gpu::error::kNoError) |
| 432 return; | 462 return; |
| 433 | 463 |
| 434 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | 464 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); |
| 435 } | 465 } |
| 436 | 466 |
| 437 void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) { | 467 void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) { |
| 438 CheckLock(); | 468 CheckLock(); |
| 439 gpu_control_client_ = client; | 469 gpu_control_client_ = client; |
| 440 } | 470 } |
| 441 | 471 |
| 442 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { | 472 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { |
| 443 return capabilities_; | 473 return capabilities_; |
| 444 } | 474 } |
| 445 | 475 |
| 446 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, | 476 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, |
| 447 size_t width, | 477 size_t width, |
| 448 size_t height, | 478 size_t height, |
| 449 unsigned internal_format) { | 479 unsigned internal_format) { |
| 450 CheckLock(); | 480 CheckLock(); |
| 481 base::AutoLock lock(state_lock_); | |
| 451 if (last_state_.error != gpu::error::kNoError) | 482 if (last_state_.error != gpu::error::kNoError) |
| 452 return -1; | 483 return -1; |
| 453 | 484 |
| 454 int32_t new_id = channel_->ReserveImageId(); | 485 int32_t new_id = channel_->ReserveImageId(); |
| 455 | 486 |
| 456 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = | 487 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = |
| 457 channel_->gpu_memory_buffer_manager(); | 488 channel_->gpu_memory_buffer_manager(); |
| 458 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 489 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
| 459 reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer); | 490 reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer); |
| 460 DCHECK(gpu_memory_buffer); | 491 DCHECK(gpu_memory_buffer); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 501 | 532 |
| 502 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, | 533 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, |
| 503 sync_token); | 534 sync_token); |
| 504 } | 535 } |
| 505 | 536 |
| 506 return new_id; | 537 return new_id; |
| 507 } | 538 } |
| 508 | 539 |
| 509 void CommandBufferProxyImpl::DestroyImage(int32_t id) { | 540 void CommandBufferProxyImpl::DestroyImage(int32_t id) { |
| 510 CheckLock(); | 541 CheckLock(); |
| 542 base::AutoLock lock(state_lock_); | |
| 511 if (last_state_.error != gpu::error::kNoError) | 543 if (last_state_.error != gpu::error::kNoError) |
| 512 return; | 544 return; |
| 513 | 545 |
| 514 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); | 546 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); |
| 515 } | 547 } |
| 516 | 548 |
| 517 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( | 549 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( |
| 518 size_t width, | 550 size_t width, |
| 519 size_t height, | 551 size_t height, |
| 520 unsigned internal_format, | 552 unsigned internal_format, |
| 521 unsigned usage) { | 553 unsigned usage) { |
| 522 CheckLock(); | 554 CheckLock(); |
| 523 std::unique_ptr<gfx::GpuMemoryBuffer> buffer( | 555 std::unique_ptr<gfx::GpuMemoryBuffer> buffer( |
| 524 channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer( | 556 channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer( |
| 525 gfx::Size(width, height), | 557 gfx::Size(width, height), |
| 526 gpu::DefaultBufferFormatForImageFormat(internal_format), | 558 gpu::DefaultBufferFormatForImageFormat(internal_format), |
| 527 gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle)); | 559 gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle)); |
| 528 if (!buffer) | 560 if (!buffer) |
| 529 return -1; | 561 return -1; |
| 530 | 562 |
| 531 int32_t result = | 563 int32_t result = |
| 532 CreateImage(buffer->AsClientBuffer(), width, height, internal_format); | 564 CreateImage(buffer->AsClientBuffer(), width, height, internal_format); |
| 533 return result; | 565 return result; |
| 534 } | 566 } |
| 535 | 567 |
| 536 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { | 568 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { |
| 537 CheckLock(); | 569 CheckLock(); |
| 570 base::AutoLock lock(state_lock_); | |
| 538 if (last_state_.error != gpu::error::kNoError) | 571 if (last_state_.error != gpu::error::kNoError) |
| 539 return 0; | 572 return 0; |
| 540 | 573 |
| 541 int32_t stream_id = channel_->GenerateRouteID(); | 574 int32_t stream_id = channel_->GenerateRouteID(); |
| 542 bool succeeded = false; | 575 bool succeeded = false; |
| 543 Send(new GpuCommandBufferMsg_CreateStreamTexture(route_id_, texture_id, | 576 Send(new GpuCommandBufferMsg_CreateStreamTexture(route_id_, texture_id, |
| 544 stream_id, &succeeded)); | 577 stream_id, &succeeded)); |
| 545 if (!succeeded) { | 578 if (!succeeded) { |
| 546 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; | 579 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; |
| 547 return 0; | 580 return 0; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 580 return release != 0 && release < next_fence_sync_release_; | 613 return release != 0 && release < next_fence_sync_release_; |
| 581 } | 614 } |
| 582 | 615 |
| 583 bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { | 616 bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { |
| 584 CheckLock(); | 617 CheckLock(); |
| 585 return release != 0 && release <= flushed_fence_sync_release_; | 618 return release != 0 && release <= flushed_fence_sync_release_; |
| 586 } | 619 } |
| 587 | 620 |
| 588 bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { | 621 bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { |
| 589 CheckLock(); | 622 CheckLock(); |
| 623 base::AutoLock lock(state_lock_); | |
| 590 if (last_state_.error != gpu::error::kNoError) | 624 if (last_state_.error != gpu::error::kNoError) |
| 591 return false; | 625 return false; |
| 592 | 626 |
| 593 if (release <= verified_fence_sync_release_) | 627 if (release <= verified_fence_sync_release_) |
| 594 return true; | 628 return true; |
| 595 | 629 |
| 596 // Check if we have actually flushed the fence sync release. | 630 // Check if we have actually flushed the fence sync release. |
| 597 if (release <= flushed_fence_sync_release_) { | 631 if (release <= flushed_fence_sync_release_) { |
| 598 DCHECK(!flushed_release_flush_id_.empty()); | 632 DCHECK(!flushed_release_flush_id_.empty()); |
| 599 // Check if it has already been validated by another context. | 633 // Check if it has already been validated by another context. |
| 600 UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); | 634 UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); |
| 601 if (release <= verified_fence_sync_release_) | 635 if (release <= verified_fence_sync_release_) |
| 602 return true; | 636 return true; |
| 603 | 637 |
| 604 // Has not been validated, validate it now. | 638 // Has not been validated, validate it now. |
| 605 UpdateVerifiedReleases( | 639 UpdateVerifiedReleases( |
| 606 channel_->ValidateFlushIDReachedServer(stream_id_, false)); | 640 channel_->ValidateFlushIDReachedServer(stream_id_, false)); |
| 607 return release <= verified_fence_sync_release_; | 641 return release <= verified_fence_sync_release_; |
| 608 } | 642 } |
| 609 | 643 |
| 610 return false; | 644 return false; |
| 611 } | 645 } |
| 612 | 646 |
| 647 bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) { | |
| 648 base::AutoLock lock(state_lock_); | |
| 649 TryUpdateStateThreadSafe(); | |
| 650 return release <= last_state_.release_count; | |
| 651 } | |
| 652 | |
| 613 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, | 653 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, |
| 614 const base::Closure& callback) { | 654 const base::Closure& callback) { |
| 615 CheckLock(); | 655 CheckLock(); |
| 656 base::AutoLock lock(state_lock_); | |
| 616 if (last_state_.error != gpu::error::kNoError) | 657 if (last_state_.error != gpu::error::kNoError) |
| 617 return; | 658 return; |
| 618 | 659 |
| 619 uint32_t signal_id = next_signal_id_++; | 660 uint32_t signal_id = next_signal_id_++; |
| 620 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, | 661 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, |
| 621 signal_id)); | 662 signal_id)); |
| 622 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 663 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
| 623 } | 664 } |
| 624 | 665 |
| 625 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( | 666 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 640 | 681 |
| 641 if (release_stream_id != stream_id_) | 682 if (release_stream_id != stream_id_) |
| 642 channel_->FlushPendingStream(release_stream_id); | 683 channel_->FlushPendingStream(release_stream_id); |
| 643 | 684 |
| 644 return true; | 685 return true; |
| 645 } | 686 } |
| 646 | 687 |
| 647 void CommandBufferProxyImpl::SignalQuery(uint32_t query, | 688 void CommandBufferProxyImpl::SignalQuery(uint32_t query, |
| 648 const base::Closure& callback) { | 689 const base::Closure& callback) { |
| 649 CheckLock(); | 690 CheckLock(); |
| 691 base::AutoLock lock(state_lock_); | |
| 650 if (last_state_.error != gpu::error::kNoError) | 692 if (last_state_.error != gpu::error::kNoError) |
| 651 return; | 693 return; |
| 652 | 694 |
| 653 // Signal identifiers are hidden, so nobody outside of this class will see | 695 // Signal identifiers are hidden, so nobody outside of this class will see |
| 654 // them. (And thus, they cannot save them.) The IDs themselves only last | 696 // them. (And thus, they cannot save them.) The IDs themselves only last |
| 655 // until the callback is invoked, which will happen as soon as the GPU | 697 // until the callback is invoked, which will happen as soon as the GPU |
| 656 // catches upwith the command buffer. | 698 // catches upwith the command buffer. |
| 657 // A malicious caller trying to create a collision by making next_signal_id | 699 // A malicious caller trying to create a collision by making next_signal_id |
| 658 // would have to make calls at an astounding rate (300B/s) and even if they | 700 // would have to make calls at an astounding rate (300B/s) and even if they |
| 659 // could do that, all they would do is to prevent some callbacks from getting | 701 // could do that, all they would do is to prevent some callbacks from getting |
| 660 // called, leading to stalled threads and/or memory leaks. | 702 // called, leading to stalled threads and/or memory leaks. |
| 661 uint32_t signal_id = next_signal_id_++; | 703 uint32_t signal_id = next_signal_id_++; |
| 662 Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id)); | 704 Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id)); |
| 663 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 705 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
| 664 } | 706 } |
| 665 | 707 |
| 666 void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) { | 708 void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) { |
| 667 CheckLock(); | 709 CheckLock(); |
| 710 base::AutoLock lock(state_lock_); | |
| 668 if (last_state_.error != gpu::error::kNoError) | 711 if (last_state_.error != gpu::error::kNoError) |
| 669 return; | 712 return; |
| 670 | 713 |
| 671 Send(new GpuCommandBufferMsg_TakeFrontBuffer(route_id_, mailbox)); | 714 Send(new GpuCommandBufferMsg_TakeFrontBuffer(route_id_, mailbox)); |
| 672 } | 715 } |
| 673 | 716 |
| 674 void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, | 717 void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, |
| 675 const gpu::SyncToken& sync_token, | 718 const gpu::SyncToken& sync_token, |
| 676 bool is_lost) { | 719 bool is_lost) { |
| 677 CheckLock(); | 720 CheckLock(); |
| 721 base::AutoLock lock(state_lock_); | |
| 678 if (last_state_.error != gpu::error::kNoError) | 722 if (last_state_.error != gpu::error::kNoError) |
| 679 return; | 723 return; |
| 680 | 724 |
| 681 Send(new GpuCommandBufferMsg_WaitSyncToken(route_id_, sync_token)); | 725 Send(new GpuCommandBufferMsg_WaitSyncToken(route_id_, sync_token)); |
| 682 Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost)); | 726 Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost)); |
| 683 } | 727 } |
| 684 | 728 |
| 685 gpu::error::Error CommandBufferProxyImpl::GetLastError() { | 729 gpu::error::Error CommandBufferProxyImpl::GetLastError() { |
|
piman
2016/12/02 19:21:48
nit: same as GetLastToken, I don't think we need t
| |
| 730 base::AutoLock lock(state_lock_); | |
| 731 TryUpdateState(); | |
| 686 return last_state_.error; | 732 return last_state_.error; |
| 687 } | 733 } |
| 688 | 734 |
| 689 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { | 735 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { |
| 690 // Caller should not intentionally send a message if the context is lost. | |
| 691 DCHECK(last_state_.error == gpu::error::kNoError); | |
| 692 DCHECK(channel_); | 736 DCHECK(channel_); |
| 737 state_lock_.AssertAcquired(); | |
| 738 DCHECK_EQ(gpu::error::kNoError, last_state_.error); | |
| 693 | 739 |
| 694 if (!msg->is_sync()) { | 740 state_lock_.Release(); |
| 695 bool result = channel_->Send(msg); | 741 |
| 696 // Send() should always return true for async messages. | 742 // Call is_sync() before sending message. |
| 697 DCHECK(result); | 743 bool is_sync = msg->is_sync(); |
| 698 return true; | 744 bool result = channel_->Send(msg); |
| 745 // Send() should always return true for async messages. | |
| 746 DCHECK(is_sync || result); | |
| 747 | |
| 748 state_lock_.Acquire(); | |
| 749 | |
| 750 if (last_state_.error != gpu::error::kNoError) { | |
| 751 // Context might have been lost on another thread while we were waiting for | |
| 752 // message send/reply. | |
| 753 OnGpuStateError(); | |
|
piman
2016/12/02 19:21:48
Ditto, I think you only want to call OnGpuControlL
sunnyps
2016/12/07 03:31:22
Done.
| |
| 754 return false; | |
| 699 } | 755 } |
| 700 | 756 |
| 701 if (channel_->Send(msg)) | 757 if (!result) { |
| 702 return true; | 758 // Flag the command buffer as lost. Defer deleting the channel until |
| 759 // OnChannelError is called after returning to the message loop in case it | |
| 760 // is referenced elsewhere. | |
| 761 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; | |
| 762 OnClientError(gpu::error::kLostContext); | |
| 763 return false; | |
| 764 } | |
| 703 | 765 |
| 704 // Flag the command buffer as lost. Defer deleting the channel until | 766 return true; |
| 705 // OnChannelError is called after returning to the message loop in case | |
| 706 // it is referenced elsewhere. | |
| 707 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; | |
| 708 OnClientError(gpu::error::kLostContext); | |
| 709 return false; | |
| 710 } | 767 } |
| 711 | 768 |
| 712 void CommandBufferProxyImpl::SetStateFromSyncReply( | 769 void CommandBufferProxyImpl::SetStateFromSyncReply( |
| 713 const gpu::CommandBuffer::State& state) { | 770 const gpu::CommandBuffer::State& state) { |
| 714 DCHECK(last_state_.error == gpu::error::kNoError); | 771 CheckLock(); |
| 772 state_lock_.AssertAcquired(); | |
| 715 // Handle wraparound. It works as long as we don't have more than 2B state | 773 // Handle wraparound. It works as long as we don't have more than 2B state |
| 716 // updates in flight across which reordering occurs. | 774 // updates in flight across which reordering occurs. |
| 717 if (state.generation - last_state_.generation < 0x80000000U) | 775 if (state.generation - last_state_.generation < 0x80000000U) |
| 718 last_state_ = state; | 776 last_state_ = state; |
| 719 if (last_state_.error != gpu::error::kNoError) | 777 if (last_state_.error != gpu::error::kNoError) |
| 720 OnGpuStateError(); | 778 OnGpuStateError(); |
| 721 } | 779 } |
| 722 | 780 |
| 723 void CommandBufferProxyImpl::TryUpdateState() { | 781 gpu::CommandBuffer::State CommandBufferProxyImpl::TryUpdateState() { |
|
piman
2016/12/02 19:21:48
I don't think it's necessary for this (and other T
sunnyps
2016/12/07 03:31:22
Done.
| |
| 782 CheckLock(); | |
| 783 state_lock_.AssertAcquired(); | |
| 724 if (last_state_.error == gpu::error::kNoError) { | 784 if (last_state_.error == gpu::error::kNoError) { |
| 725 shared_state()->Read(&last_state_); | 785 shared_state()->Read(&last_state_); |
| 726 if (last_state_.error != gpu::error::kNoError) | 786 if (last_state_.error != gpu::error::kNoError) |
| 727 OnGpuStateError(); | 787 OnGpuStateError(); |
| 728 } | 788 } |
| 789 return last_state_; | |
| 729 } | 790 } |
| 730 | 791 |
| 731 void CommandBufferProxyImpl::TryUpdateStateDontReportError() { | 792 gpu::CommandBuffer::State CommandBufferProxyImpl::TryUpdateStateThreadSafe() { |
| 793 state_lock_.AssertAcquired(); | |
| 794 if (last_state_.error == gpu::error::kNoError) { | |
| 795 shared_state()->Read(&last_state_); | |
| 796 if (last_state_.error != gpu::error::kNoError) { | |
| 797 callback_thread_->PostTask( | |
| 798 FROM_HERE, | |
| 799 base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, | |
| 800 weak_this_)); | |
| 801 } | |
| 802 } | |
| 803 return last_state_; | |
| 804 } | |
| 805 | |
| 806 gpu::CommandBuffer::State | |
| 807 CommandBufferProxyImpl::TryUpdateStateDontReportError() { | |
| 808 state_lock_.AssertAcquired(); | |
| 732 if (last_state_.error == gpu::error::kNoError) | 809 if (last_state_.error == gpu::error::kNoError) |
| 733 shared_state()->Read(&last_state_); | 810 shared_state()->Read(&last_state_); |
| 811 return last_state_; | |
| 734 } | 812 } |
| 735 | 813 |
| 736 void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { | 814 void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { |
| 737 while (!flushed_release_flush_id_.empty()) { | 815 while (!flushed_release_flush_id_.empty()) { |
| 738 const std::pair<uint64_t, uint32_t>& front_item = | 816 const std::pair<uint64_t, uint32_t>& front_item = |
| 739 flushed_release_flush_id_.front(); | 817 flushed_release_flush_id_.front(); |
| 740 if (front_item.second > verified_flush) | 818 if (front_item.second > verified_flush) |
| 741 break; | 819 break; |
| 742 verified_fence_sync_release_ = front_item.first; | 820 verified_fence_sync_release_ = front_item.first; |
| 743 flushed_release_flush_id_.pop(); | 821 flushed_release_flush_id_.pop(); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 792 } | 870 } |
| 793 } | 871 } |
| 794 | 872 |
| 795 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, | 873 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, |
| 796 base::TimeDelta interval) { | 874 base::TimeDelta interval) { |
| 797 if (!update_vsync_parameters_completion_callback_.is_null()) | 875 if (!update_vsync_parameters_completion_callback_.is_null()) |
| 798 update_vsync_parameters_completion_callback_.Run(timebase, interval); | 876 update_vsync_parameters_completion_callback_.Run(timebase, interval); |
| 799 } | 877 } |
| 800 | 878 |
| 801 void CommandBufferProxyImpl::OnGpuSyncReplyError() { | 879 void CommandBufferProxyImpl::OnGpuSyncReplyError() { |
| 880 CheckLock(); | |
| 881 state_lock_.AssertAcquired(); | |
| 802 last_state_.error = gpu::error::kLostContext; | 882 last_state_.error = gpu::error::kLostContext; |
| 803 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; | 883 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; |
| 804 // This method may be inside a callstack from the GpuControlClient (we got a | 884 // This method may be inside a callstack from the GpuControlClient (we got a |
| 805 // bad reply to something we are sending to the GPU process). So avoid | 885 // bad reply to something we are sending to the GPU process). So avoid |
| 806 // re-entering the GpuControlClient here. | 886 // re-entering the GpuControlClient here. |
| 807 DisconnectChannelInFreshCallStack(); | 887 DisconnectChannelInFreshCallStack(); |
| 808 } | 888 } |
| 809 | 889 |
| 810 void CommandBufferProxyImpl::OnGpuAsyncMessageError( | 890 void CommandBufferProxyImpl::OnGpuAsyncMessageError( |
| 811 gpu::error::ContextLostReason reason, | 891 gpu::error::ContextLostReason reason, |
| 812 gpu::error::Error error) { | 892 gpu::error::Error error) { |
| 813 CheckLock(); | 893 CheckLock(); |
| 894 state_lock_.AssertAcquired(); | |
| 814 last_state_.error = error; | 895 last_state_.error = error; |
| 815 last_state_.context_lost_reason = reason; | 896 last_state_.context_lost_reason = reason; |
| 816 // This method only occurs when receiving IPC messages, so we know it's not in | 897 // This method only occurs when receiving IPC messages, so we know it's not in |
| 817 // a callstack from the GpuControlClient. | 898 // a callstack from the GpuControlClient. |
| 818 DisconnectChannel(); | 899 DisconnectChannel(); |
| 819 } | 900 } |
| 820 | 901 |
| 821 void CommandBufferProxyImpl::OnGpuStateError() { | 902 void CommandBufferProxyImpl::OnGpuStateError() { |
| 822 DCHECK(last_state_.error != gpu::error::kNoError); | 903 CheckLock(); |
| 904 state_lock_.AssertAcquired(); | |
| 905 DCHECK_NE(gpu::error::kNoError, last_state_.error); | |
| 823 // This method may be inside a callstack from the GpuControlClient (we | 906 // This method may be inside a callstack from the GpuControlClient (we |
| 824 // encountered an error while trying to perform some action). So avoid | 907 // encountered an error while trying to perform some action). So avoid |
| 825 // re-entering the GpuControlClient here. | 908 // re-entering the GpuControlClient here. |
| 826 DisconnectChannelInFreshCallStack(); | 909 DisconnectChannelInFreshCallStack(); |
| 827 } | 910 } |
| 828 | 911 |
| 829 void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { | 912 void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { |
| 830 CheckLock(); | 913 CheckLock(); |
| 914 state_lock_.AssertAcquired(); | |
| 831 last_state_.error = error; | 915 last_state_.error = error; |
| 832 last_state_.context_lost_reason = gpu::error::kUnknown; | 916 last_state_.context_lost_reason = gpu::error::kUnknown; |
| 833 // This method may be inside a callstack from the GpuControlClient (we | 917 // This method may be inside a callstack from the GpuControlClient (we |
| 834 // encountered an error while trying to perform some action). So avoid | 918 // encountered an error while trying to perform some action). So avoid |
| 835 // re-entering the GpuControlClient here. | 919 // re-entering the GpuControlClient here. |
| 836 DisconnectChannelInFreshCallStack(); | 920 DisconnectChannelInFreshCallStack(); |
| 837 } | 921 } |
| 838 | 922 |
| 839 void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() { | 923 void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() { |
| 840 CheckLock(); | 924 CheckLock(); |
| 925 state_lock_.AssertAcquired(); | |
| 841 // Inform the GpuControlClient of the lost state immediately, though this may | 926 // Inform the GpuControlClient of the lost state immediately, though this may |
| 842 // be a re-entrant call to the client so we use the MaybeReentrant variant. | 927 // be a re-entrant call to the client so we use the MaybeReentrant variant. |
| 843 if (gpu_control_client_) | 928 if (gpu_control_client_) |
| 844 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); | 929 gpu_control_client_->OnGpuControlLostContextMaybeReentrant(); |
| 845 // Create a fresh call stack to keep the |channel_| alive while we unwind the | 930 // Create a fresh call stack to keep the |channel_| alive while we unwind the |
| 846 // stack in case things will use it, and give the GpuChannelClient a chance to | 931 // stack in case things will use it, and give the GpuChannelClient a chance to |
| 847 // act fully on the lost context. | 932 // act fully on the lost context. |
| 848 callback_thread_->PostTask( | 933 callback_thread_->PostTask( |
| 849 FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, | 934 FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, |
| 850 weak_this_)); | 935 weak_this_)); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 861 CheckLock(); | 946 CheckLock(); |
| 862 // Prevent any further messages from being sent, and ensure we only call | 947 // Prevent any further messages from being sent, and ensure we only call |
| 863 // the client for lost context a single time. | 948 // the client for lost context a single time. |
| 864 if (!channel_) | 949 if (!channel_) |
| 865 return; | 950 return; |
| 866 channel_->FlushPendingStream(stream_id_); | 951 channel_->FlushPendingStream(stream_id_); |
| 867 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); | 952 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); |
| 868 channel_->RemoveRoute(route_id_); | 953 channel_->RemoveRoute(route_id_); |
| 869 channel_ = nullptr; | 954 channel_ = nullptr; |
| 870 if (gpu_control_client_) | 955 if (gpu_control_client_) |
| 871 gpu_control_client_->OnGpuControlLostContext(); | 956 gpu_control_client_->OnGpuControlLostContext(); |
|
piman
2016/12/02 19:21:48
There are some paths where state_lock_ is held (e.
sunnyps
2016/12/07 03:31:22
Changed it so that the OnGpuAsyncMessageError rele
| |
| 872 } | 957 } |
| 873 | 958 |
| 874 } // namespace gpu | 959 } // namespace gpu |
| OLD | NEW |