Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 1336623004: content/gpu: Simplify gpu channel message handling. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: handle preemption better Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/hash.h" 8 #include "base/hash.h"
9 #include "base/json/json_writer.h" 9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h" 10 #include "base/memory/shared_memory.h"
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after
277 OnInitialize); 277 OnInitialize);
278 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, 278 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
279 OnSetGetBuffer); 279 OnSetGetBuffer);
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer, 280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
281 OnProduceFrontBuffer); 281 OnProduceFrontBuffer);
282 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, 282 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
283 OnWaitForTokenInRange); 283 OnWaitForTokenInRange);
284 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, 284 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
285 OnWaitForGetOffsetInRange); 285 OnWaitForGetOffsetInRange);
286 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); 286 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
287 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
288 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, 287 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
289 OnRegisterTransferBuffer); 288 OnRegisterTransferBuffer);
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, 289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
291 OnDestroyTransferBuffer); 290 OnDestroyTransferBuffer);
292 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder, 291 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
293 OnCreateVideoDecoder) 292 OnCreateVideoDecoder)
294 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder, 293 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
295 OnCreateVideoEncoder) 294 OnCreateVideoEncoder)
296 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible, 295 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
297 OnSetSurfaceVisible) 296 OnSetSurfaceVisible)
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 } 334 }
336 335
337 void GpuCommandBufferStub::PollWork() { 336 void GpuCommandBufferStub::PollWork() {
338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); 337 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
339 delayed_work_scheduled_ = false; 338 delayed_work_scheduled_ = false;
340 FastSetActiveURL(active_url_, active_url_hash_); 339 FastSetActiveURL(active_url_, active_url_hash_);
341 if (decoder_.get() && !MakeCurrent()) 340 if (decoder_.get() && !MakeCurrent())
342 return; 341 return;
343 342
344 if (scheduler_) { 343 if (scheduler_) {
345 const uint32_t current_unprocessed_num = 344 uint32_t current_unprocessed_num =
346 channel()->gpu_channel_manager()->UnprocessedOrderNumber(); 345 channel()->gpu_channel_manager()->GetUnprocessedOrderNum();
347 // We're idle when no messages were processed or scheduled. 346 // We're idle when no messages were processed or scheduled.
348 bool is_idle = (previous_processed_num_ == current_unprocessed_num); 347 bool is_idle = (previous_processed_num_ == current_unprocessed_num);
349 if (!is_idle && !last_idle_time_.is_null()) { 348 if (!is_idle && !last_idle_time_.is_null()) {
350 base::TimeDelta time_since_idle = 349 base::TimeDelta time_since_idle =
351 base::TimeTicks::Now() - last_idle_time_; 350 base::TimeTicks::Now() - last_idle_time_;
352 base::TimeDelta max_time_since_idle = 351 base::TimeDelta max_time_since_idle =
353 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); 352 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
354 353
355 // Force idle when it's been too long since last time we were idle. 354 // Force idle when it's been too long since last time we were idle.
356 if (time_since_idle > max_time_since_idle) 355 if (time_since_idle > max_time_since_idle)
(...skipping 23 matching lines...) Expand all
380 return; 379 return;
381 } 380 }
382 381
383 if (delayed_work_scheduled_) 382 if (delayed_work_scheduled_)
384 return; 383 return;
385 delayed_work_scheduled_ = true; 384 delayed_work_scheduled_ = true;
386 385
387 // Idle when no messages are processed between now and when 386 // Idle when no messages are processed between now and when
388 // PollWork is called. 387 // PollWork is called.
389 previous_processed_num_ = 388 previous_processed_num_ =
390 channel()->gpu_channel_manager()->ProcessedOrderNumber(); 389 channel()->gpu_channel_manager()->GetProcessedOrderNum();
391 if (last_idle_time_.is_null()) 390 if (last_idle_time_.is_null())
392 last_idle_time_ = base::TimeTicks::Now(); 391 last_idle_time_ = base::TimeTicks::Now();
393 392
394 // IsScheduled() returns true after passing all unschedule fences 393 // IsScheduled() returns true after passing all unschedule fences
395 // and this is when we can start performing idle work. Idle work 394 // and this is when we can start performing idle work. Idle work
396 // is done synchronously so we can set delay to 0 and instead poll 395 // is done synchronously so we can set delay to 0 and instead poll
397 // for more work at the rate idle work is performed. This also ensures 396 // for more work at the rate idle work is performed. This also ensures
398 // that idle work is done as efficiently as possible without any 397 // that idle work is done as efficiently as possible without any
399 // unnecessary delays. 398 // unnecessary delays.
400 if (scheduler_.get() && 399 if (scheduler_.get() &&
(...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after
774 } 773 }
775 } 774 }
776 } 775 }
777 776
778 void GpuCommandBufferStub::OnAsyncFlush( 777 void GpuCommandBufferStub::OnAsyncFlush(
779 int32 put_offset, 778 int32 put_offset,
780 uint32 flush_count, 779 uint32 flush_count,
781 const std::vector<ui::LatencyInfo>& latency_info) { 780 const std::vector<ui::LatencyInfo>& latency_info) {
782 TRACE_EVENT1( 781 TRACE_EVENT1(
783 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); 782 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
783 DCHECK(command_buffer_);
784 784
785 if (ui::LatencyInfo::Verify(latency_info, 785 // We received this message out-of-order. This should not happen but is here
786 // to catch regressions. Ignore the message.
787 if (flush_count - last_flush_count_ >= 0x8000000U)
788 DVLOG(0) << "Received a Flush message out-of-order";
piman 2015/09/15 23:05:11 nit: use DVLOG_IF
sunnyps 2015/09/16 00:52:24 Done.
789
790 if (flush_count > last_flush_count_ &&
791 ui::LatencyInfo::Verify(latency_info,
786 "GpuCommandBufferStub::OnAsyncFlush") && 792 "GpuCommandBufferStub::OnAsyncFlush") &&
787 !latency_info_callback_.is_null()) { 793 !latency_info_callback_.is_null()) {
788 latency_info_callback_.Run(latency_info); 794 latency_info_callback_.Run(latency_info);
789 } 795 }
790 DCHECK(command_buffer_.get());
791 if (flush_count - last_flush_count_ < 0x8000000U) {
792 last_flush_count_ = flush_count;
793 command_buffer_->Flush(put_offset);
794 } else {
795 // We received this message out-of-order. This should not happen but is here
796 // to catch regressions. Ignore the message.
797 NOTREACHED() << "Received a Flush message out-of-order";
798 }
799 796
800 ReportState(); 797 last_flush_count_ = flush_count;
801 }
802
803 void GpuCommandBufferStub::OnRescheduled() {
804 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState(); 798 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
805 command_buffer_->Flush(command_buffer_->GetPutOffset()); 799 command_buffer_->Flush(put_offset);
806 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState(); 800 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
807 801
808 if (pre_state.get_offset != post_state.get_offset) 802 if (pre_state.get_offset != post_state.get_offset)
809 ReportState(); 803 ReportState();
810 } 804 }
811 805
812 void GpuCommandBufferStub::OnRegisterTransferBuffer( 806 void GpuCommandBufferStub::OnRegisterTransferBuffer(
813 int32 id, 807 int32 id,
814 base::SharedMemoryHandle transfer_buffer, 808 base::SharedMemoryHandle transfer_buffer,
815 uint32 size) { 809 uint32 size) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
879 // encoder is registered as a DestructionObserver of this stub and will 873 // encoder is registered as a DestructionObserver of this stub and will
880 // self-delete during destruction of this stub. 874 // self-delete during destruction of this stub.
881 } 875 }
882 876
883 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) { 877 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
884 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible"); 878 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
885 if (memory_manager_client_state_) 879 if (memory_manager_client_state_)
886 memory_manager_client_state_->SetVisible(visible); 880 memory_manager_client_state_->SetVisible(visible);
887 } 881 }
888 882
889 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) { 883 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point, bool retire) {
890 sync_points_.push_back(sync_point); 884 sync_points_.push_back(sync_point);
885 if (retire)
886 OnRetireSyncPoint(sync_point);
891 } 887 }
892 888
893 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) { 889 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
894 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point); 890 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
895 sync_points_.pop_front(); 891 sync_points_.pop_front();
896 892
897 gpu::gles2::MailboxManager* mailbox_manager = 893 gpu::gles2::MailboxManager* mailbox_manager =
898 context_group_->mailbox_manager(); 894 context_group_->mailbox_manager();
899 if (mailbox_manager->UsesSync() && MakeCurrent()) 895 if (mailbox_manager->UsesSync() && MakeCurrent())
900 mailbox_manager->PushTextureUpdates(sync_point); 896 mailbox_manager->PushTextureUpdates(sync_point);
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after
1178 result)); 1174 result));
1179 } 1175 }
1180 1176
1181 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, 1177 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1182 base::TimeDelta interval) { 1178 base::TimeDelta interval) {
1183 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, 1179 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1184 interval)); 1180 interval));
1185 } 1181 }
1186 1182
1187 } // namespace content 1183 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698