Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(297)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 782583003: List sync points to wait on in AsyncFlush message Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h" 8 #include "base/debug/trace_event.h"
9 #include "base/hash.h" 9 #include "base/hash.h"
10 #include "base/json/json_writer.h" 10 #include "base/json/json_writer.h"
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 215 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
216 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), 216 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
217 "GPUTask", 217 "GPUTask",
218 "data", 218 "data",
219 DevToolsChannelData::CreateForChannel(channel())); 219 DevToolsChannelData::CreateForChannel(channel()));
220 // TODO(yurys): remove devtools_gpu_instrumentation call once DevTools 220 // TODO(yurys): remove devtools_gpu_instrumentation call once DevTools
221 // Timeline migrates to tracing crbug.com/361045. 221 // Timeline migrates to tracing crbug.com/361045.
222 devtools_gpu_instrumentation::ScopedGpuTask task(channel()); 222 devtools_gpu_instrumentation::ScopedGpuTask task(channel());
223 FastSetActiveURL(active_url_, active_url_hash_); 223 FastSetActiveURL(active_url_, active_url_hash_);
224 224
225 bool have_context = false;
226 // Ensure the appropriate GL context is current before handling any IPC 225 // Ensure the appropriate GL context is current before handling any IPC
227 // messages directed at the command buffer. This ensures that the message 226 // messages directed at the command buffer. This ensures that the message
228 // handler can assume that the context is current (not necessary for 227 // handler can assume that the context is current (not necessary for
229 // RetireSyncPoint or WaitSyncPoint). 228 // RetireSyncPoint or WaitSyncPoint).
230 if (decoder_.get() && 229 if (decoder_.get() &&
231 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID && 230 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
232 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && 231 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
233 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID && 232 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
234 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID && 233 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
235 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID && 234 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
236 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID && 235 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
237 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID && 236 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
237 message.type() != GpuCommandBufferMsg_AsyncFlush::ID &&
238 message.type() != 238 message.type() !=
239 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) { 239 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
240 if (!MakeCurrent()) 240 if (!MakeCurrent())
241 return false; 241 return false;
242 have_context = true;
243 } 242 }
244 243
245 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers 244 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
246 // here. This is so the reply can be delayed if the scheduler is unscheduled. 245 // here. This is so the reply can be delayed if the scheduler is unscheduled.
247 bool handled = true; 246 bool handled = true;
248 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 247 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
249 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 248 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
250 OnInitialize); 249 OnInitialize);
251 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, 250 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
252 OnSetGetBuffer); 251 OnSetGetBuffer);
(...skipping 26 matching lines...) Expand all
279 OnSetClientHasMemoryAllocationChangedCallback) 278 OnSetClientHasMemoryAllocationChangedCallback)
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage); 279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage); 280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
282 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture, 281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
283 OnCreateStreamTexture) 282 OnCreateStreamTexture)
284 IPC_MESSAGE_UNHANDLED(handled = false) 283 IPC_MESSAGE_UNHANDLED(handled = false)
285 IPC_END_MESSAGE_MAP() 284 IPC_END_MESSAGE_MAP()
286 285
287 CheckCompleteWaits(); 286 CheckCompleteWaits();
288 287
289 if (have_context) { 288 // Ensure that any delayed work that was created will be handled.
290 // Ensure that any delayed work that was created will be handled. 289 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
291 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
292 }
293 290
294 DCHECK(handled); 291 DCHECK(handled);
295 return handled; 292 return handled;
296 } 293 }
297 294
298 bool GpuCommandBufferStub::Send(IPC::Message* message) { 295 bool GpuCommandBufferStub::Send(IPC::Message* message) {
299 return channel_->Send(message); 296 return channel_->Send(message);
300 } 297 }
301 298
302 bool GpuCommandBufferStub::IsScheduled() { 299 bool GpuCommandBufferStub::IsScheduled() {
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
576 switches::kEnableGPUServiceLogging)) { 573 switches::kEnableGPUServiceLogging)) {
577 decoder_->set_log_commands(true); 574 decoder_->set_log_commands(true);
578 } 575 }
579 576
580 decoder_->GetLogger()->SetMsgCallback( 577 decoder_->GetLogger()->SetMsgCallback(
581 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, 578 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
582 base::Unretained(this))); 579 base::Unretained(this)));
583 decoder_->SetShaderCacheCallback( 580 decoder_->SetShaderCacheCallback(
584 base::Bind(&GpuCommandBufferStub::SendCachedShader, 581 base::Bind(&GpuCommandBufferStub::SendCachedShader,
585 base::Unretained(this))); 582 base::Unretained(this)));
586 decoder_->SetWaitSyncPointCallback(
587 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
588 base::Unretained(this)));
589 583
590 command_buffer_->SetPutOffsetChangeCallback( 584 command_buffer_->SetPutOffsetChangeCallback(
591 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this))); 585 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
592 command_buffer_->SetGetBufferChangeCallback( 586 command_buffer_->SetGetBufferChangeCallback(
593 base::Bind(&gpu::GpuScheduler::SetGetBuffer, 587 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
594 base::Unretained(scheduler_.get()))); 588 base::Unretained(scheduler_.get())));
595 command_buffer_->SetParseErrorCallback( 589 command_buffer_->SetParseErrorCallback(
596 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this))); 590 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
597 scheduler_->SetSchedulingChangedCallback( 591 scheduler_->SetSchedulingChangedCallback(
598 base::Bind(&GpuChannel::StubSchedulingChanged, 592 base::Bind(&GpuChannel::StubSchedulingChanged,
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
742 wait_for_get_offset_->reply.get(), state); 736 wait_for_get_offset_->reply.get(), state);
743 Send(wait_for_get_offset_->reply.release()); 737 Send(wait_for_get_offset_->reply.release());
744 wait_for_get_offset_.reset(); 738 wait_for_get_offset_.reset();
745 } 739 }
746 } 740 }
747 } 741 }
748 742
749 void GpuCommandBufferStub::OnAsyncFlush( 743 void GpuCommandBufferStub::OnAsyncFlush(
750 int32 put_offset, 744 int32 put_offset,
751 uint32 flush_count, 745 uint32 flush_count,
746 const std::vector<uint32>& sync_points,
752 const std::vector<ui::LatencyInfo>& latency_info) { 747 const std::vector<ui::LatencyInfo>& latency_info) {
753 TRACE_EVENT1( 748 TRACE_EVENT1(
754 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); 749 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
755 750
756 if (ui::LatencyInfo::Verify(latency_info, 751 if (ui::LatencyInfo::Verify(latency_info,
757 "GpuCommandBufferStub::OnAsyncFlush") && 752 "GpuCommandBufferStub::OnAsyncFlush") &&
758 !latency_info_callback_.is_null()) { 753 !latency_info_callback_.is_null()) {
759 latency_info_callback_.Run(latency_info); 754 latency_info_callback_.Run(latency_info);
760 } 755 }
756 for (uint32 sync_point : sync_points) {
757 WaitSyncPoint(sync_point);
758 }
759 if (scheduler_->IsScheduled())
760 MakeCurrent();
761 DCHECK(command_buffer_.get()); 761 DCHECK(command_buffer_.get());
762 if (flush_count - last_flush_count_ < 0x8000000U) { 762 if (flush_count - last_flush_count_ < 0x8000000U) {
763 last_flush_count_ = flush_count; 763 last_flush_count_ = flush_count;
764 command_buffer_->Flush(put_offset); 764 command_buffer_->Flush(put_offset, sync_points);
765 } else { 765 } else {
766 // We received this message out-of-order. This should not happen but is here 766 // We received this message out-of-order. This should not happen but is here
767 // to catch regressions. Ignore the message. 767 // to catch regressions. Ignore the message.
768 NOTREACHED() << "Received a Flush message out-of-order"; 768 NOTREACHED() << "Received a Flush message out-of-order";
769 } 769 }
770 770
771 ReportState(); 771 ReportState();
772 } 772 }
773 773
774 void GpuCommandBufferStub::OnRescheduled() { 774 void GpuCommandBufferStub::OnRescheduled() {
775 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState(); 775 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
776 command_buffer_->Flush(command_buffer_->GetPutOffset()); 776 command_buffer_->Flush(command_buffer_->GetPutOffset(),
777 std::vector<uint32>());
777 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState(); 778 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
778 779
779 if (pre_state.get_offset != post_state.get_offset) 780 if (pre_state.get_offset != post_state.get_offset)
780 ReportState(); 781 ReportState();
781 } 782 }
782 783
783 void GpuCommandBufferStub::OnRegisterTransferBuffer( 784 void GpuCommandBufferStub::OnRegisterTransferBuffer(
784 int32 id, 785 int32 id,
785 base::SharedMemoryHandle transfer_buffer, 786 base::SharedMemoryHandle transfer_buffer,
786 uint32 size) { 787 uint32 size) {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
861 sync_points_.push_back(sync_point); 862 sync_points_.push_back(sync_point);
862 } 863 }
863 864
864 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) { 865 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
865 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point); 866 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
866 sync_points_.pop_front(); 867 sync_points_.pop_front();
867 GpuChannelManager* manager = channel_->gpu_channel_manager(); 868 GpuChannelManager* manager = channel_->gpu_channel_manager();
868 manager->sync_point_manager()->RetireSyncPoint(sync_point); 869 manager->sync_point_manager()->RetireSyncPoint(sync_point);
869 } 870 }
870 871
871 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) { 872 void GpuCommandBufferStub::WaitSyncPoint(uint32 sync_point) {
872 if (!sync_point) 873 if (!sync_point)
873 return true; 874 return;
874 GpuChannelManager* manager = channel_->gpu_channel_manager(); 875 GpuChannelManager* manager = channel_->gpu_channel_manager();
875 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point)) 876 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
876 return true; 877 return;
877 878
878 if (sync_point_wait_count_ == 0) { 879 if (sync_point_wait_count_ == 0) {
879 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this, 880 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
880 "GpuCommandBufferStub", this); 881 "GpuCommandBufferStub", this);
881 } 882 }
882 scheduler_->SetScheduled(false); 883 scheduler_->SetScheduled(false);
883 ++sync_point_wait_count_; 884 ++sync_point_wait_count_;
884 manager->sync_point_manager()->AddSyncPointCallback( 885 manager->sync_point_manager()->AddSyncPointCallback(
885 sync_point, 886 sync_point,
886 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired, 887 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
887 this->AsWeakPtr())); 888 this->AsWeakPtr()));
888 return scheduler_->IsScheduled();
889 } 889 }
890 890
891 void GpuCommandBufferStub::OnSyncPointRetired() { 891 void GpuCommandBufferStub::OnSyncPointRetired() {
892 --sync_point_wait_count_; 892 --sync_point_wait_count_;
893 if (sync_point_wait_count_ == 0) { 893 if (sync_point_wait_count_ == 0) {
894 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this, 894 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
895 "GpuCommandBufferStub", this); 895 "GpuCommandBufferStub", this);
896 } 896 }
897 scheduler_->SetScheduled(true); 897 scheduler_->SetScheduled(true);
898 } 898 }
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1083 uint64 GpuCommandBufferStub::GetMemoryUsage() const { 1083 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1084 return GetMemoryManager()->GetClientMemoryUsage(this); 1084 return GetMemoryManager()->GetClientMemoryUsage(this);
1085 } 1085 }
1086 1086
1087 void GpuCommandBufferStub::SwapBuffersCompleted( 1087 void GpuCommandBufferStub::SwapBuffersCompleted(
1088 const std::vector<ui::LatencyInfo>& latency_info) { 1088 const std::vector<ui::LatencyInfo>& latency_info) {
1089 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info)); 1089 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info));
1090 } 1090 }
1091 1091
1092 } // namespace content 1092 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698