| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/bind.h" | 5 #include "base/bind.h" |
| 6 #include "base/bind_helpers.h" | 6 #include "base/bind_helpers.h" |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
| 9 #include "base/hash.h" | 9 #include "base/hash.h" |
| 10 #include "base/json/json_writer.h" | 10 #include "base/json/json_writer.h" |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 FastSetActiveURL(active_url_, active_url_hash_); | 222 FastSetActiveURL(active_url_, active_url_hash_); |
| 223 | 223 |
| 224 bool have_context = false; | 224 bool have_context = false; |
| 225 // Ensure the appropriate GL context is current before handling any IPC | 225 // Ensure the appropriate GL context is current before handling any IPC |
| 226 // messages directed at the command buffer. This ensures that the message | 226 // messages directed at the command buffer. This ensures that the message |
| 227 // handler can assume that the context is current (not necessary for | 227 // handler can assume that the context is current (not necessary for |
| 228 // Echo, RetireSyncPoint, or WaitSyncPoint). | 228 // Echo, RetireSyncPoint, or WaitSyncPoint). |
| 229 if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID && | 229 if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID && |
| 230 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && | 230 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && |
| 231 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID && | 231 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID && |
| 232 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID && | 232 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) { |
| 233 message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) { | |
| 234 if (!MakeCurrent()) | 233 if (!MakeCurrent()) |
| 235 return false; | 234 return false; |
| 236 have_context = true; | 235 have_context = true; |
| 237 } | 236 } |
| 238 | 237 |
| 239 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers | 238 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers |
| 240 // here. This is so the reply can be delayed if the scheduler is unscheduled. | 239 // here. This is so the reply can be delayed if the scheduler is unscheduled. |
| 241 bool handled = true; | 240 bool handled = true; |
| 242 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) | 241 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) |
| 243 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, | 242 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, |
| 244 OnInitialize); | 243 OnInitialize); |
| 245 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, | 244 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, |
| 246 OnSetGetBuffer); | 245 OnSetGetBuffer); |
| 247 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer, | 246 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer, |
| 248 OnProduceFrontBuffer); | 247 OnProduceFrontBuffer); |
| 249 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho); | 248 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho); |
| 250 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, | 249 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, |
| 251 OnWaitForTokenInRange); | 250 OnWaitForTokenInRange); |
| 252 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, | 251 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, |
| 253 OnWaitForGetOffsetInRange); | 252 OnWaitForGetOffsetInRange); |
| 254 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); | 253 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); |
| 255 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo); | |
| 256 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled); | 254 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled); |
| 257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, | 255 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, |
| 258 OnRegisterTransferBuffer); | 256 OnRegisterTransferBuffer); |
| 259 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, | 257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, |
| 260 OnDestroyTransferBuffer); | 258 OnDestroyTransferBuffer); |
| 261 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder, | 259 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder, |
| 262 OnCreateVideoDecoder) | 260 OnCreateVideoDecoder) |
| 263 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder, | 261 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder, |
| 264 OnCreateVideoEncoder) | 262 OnCreateVideoEncoder) |
| 265 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible, | 263 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible, |
| (...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 623 reply_message, true, capabilities); | 621 reply_message, true, capabilities); |
| 624 Send(reply_message); | 622 Send(reply_message); |
| 625 | 623 |
| 626 if (handle_.is_null() && !active_url_.is_empty()) { | 624 if (handle_.is_null() && !active_url_.is_empty()) { |
| 627 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | 625 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); |
| 628 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext( | 626 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext( |
| 629 active_url_)); | 627 active_url_)); |
| 630 } | 628 } |
| 631 } | 629 } |
| 632 | 630 |
| 633 void GpuCommandBufferStub::OnSetLatencyInfo( | |
| 634 const std::vector<ui::LatencyInfo>& latency_info) { | |
| 635 if (!ui::LatencyInfo::Verify(latency_info, | |
| 636 "GpuCommandBufferStub::OnSetLatencyInfo")) | |
| 637 return; | |
| 638 if (!latency_info_callback_.is_null()) | |
| 639 latency_info_callback_.Run(latency_info); | |
| 640 } | |
| 641 | |
| 642 void GpuCommandBufferStub::OnCreateStreamTexture( | 631 void GpuCommandBufferStub::OnCreateStreamTexture( |
| 643 uint32 texture_id, int32 stream_id, bool* succeeded) { | 632 uint32 texture_id, int32 stream_id, bool* succeeded) { |
| 644 #if defined(OS_ANDROID) | 633 #if defined(OS_ANDROID) |
| 645 *succeeded = StreamTexture::Create(this, texture_id, stream_id); | 634 *succeeded = StreamTexture::Create(this, texture_id, stream_id); |
| 646 #else | 635 #else |
| 647 *succeeded = false; | 636 *succeeded = false; |
| 648 #endif | 637 #endif |
| 649 } | 638 } |
| 650 | 639 |
| 651 void GpuCommandBufferStub::SetLatencyInfoCallback( | 640 void GpuCommandBufferStub::SetLatencyInfoCallback( |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 751 state.error != gpu::error::kNoError)) { | 740 state.error != gpu::error::kNoError)) { |
| 752 ReportState(); | 741 ReportState(); |
| 753 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( | 742 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( |
| 754 wait_for_get_offset_->reply.get(), state); | 743 wait_for_get_offset_->reply.get(), state); |
| 755 Send(wait_for_get_offset_->reply.release()); | 744 Send(wait_for_get_offset_->reply.release()); |
| 756 wait_for_get_offset_.reset(); | 745 wait_for_get_offset_.reset(); |
| 757 } | 746 } |
| 758 } | 747 } |
| 759 } | 748 } |
| 760 | 749 |
| 761 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { | 750 void GpuCommandBufferStub::OnAsyncFlush( |
| 751 int32 put_offset, |
| 752 uint32 flush_count, |
| 753 const std::vector<ui::LatencyInfo>& latency_info) { |
| 762 TRACE_EVENT1( | 754 TRACE_EVENT1( |
| 763 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); | 755 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); |
| 756 |
| 757 if (ui::LatencyInfo::Verify(latency_info, |
| 758 "GpuCommandBufferStub::OnAsyncFlush") && |
| 759 !latency_info_callback_.is_null()) { |
| 760 latency_info_callback_.Run(latency_info); |
| 761 } |
| 764 DCHECK(command_buffer_.get()); | 762 DCHECK(command_buffer_.get()); |
| 765 if (flush_count - last_flush_count_ < 0x8000000U) { | 763 if (flush_count - last_flush_count_ < 0x8000000U) { |
| 766 last_flush_count_ = flush_count; | 764 last_flush_count_ = flush_count; |
| 767 command_buffer_->Flush(put_offset); | 765 command_buffer_->Flush(put_offset); |
| 768 } else { | 766 } else { |
| 769 // We received this message out-of-order. This should not happen but is here | 767 // We received this message out-of-order. This should not happen but is here |
| 770 // to catch regressions. Ignore the message. | 768 // to catch regressions. Ignore the message. |
| 771 NOTREACHED() << "Received a Flush message out-of-order"; | 769 NOTREACHED() << "Received a Flush message out-of-order"; |
| 772 } | 770 } |
| 773 | 771 |
| (...skipping 327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1101 if (decoder_) | 1099 if (decoder_) |
| 1102 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB); | 1100 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB); |
| 1103 command_buffer_->SetParseError(gpu::error::kLostContext); | 1101 command_buffer_->SetParseError(gpu::error::kLostContext); |
| 1104 } | 1102 } |
| 1105 | 1103 |
| 1106 uint64 GpuCommandBufferStub::GetMemoryUsage() const { | 1104 uint64 GpuCommandBufferStub::GetMemoryUsage() const { |
| 1107 return GetMemoryManager()->GetClientMemoryUsage(this); | 1105 return GetMemoryManager()->GetClientMemoryUsage(this); |
| 1108 } | 1106 } |
| 1109 | 1107 |
| 1110 } // namespace content | 1108 } // namespace content |
| OLD | NEW |