Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(86)

Side by Side Diff: gpu/ipc/service/gpu_channel.cc

Issue 2814843002: gpu: GPU service scheduler. (Closed)
Patch Set: fix test dcheck failures Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/service/gpu_channel.h" 5 #include "gpu/ipc/service/gpu_channel.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #if defined(OS_WIN) 9 #if defined(OS_WIN)
10 #include <windows.h> 10 #include <windows.h>
(...skipping 16 matching lines...) Expand all
27 #include "base/threading/thread_task_runner_handle.h" 27 #include "base/threading/thread_task_runner_handle.h"
28 #include "base/timer/timer.h" 28 #include "base/timer/timer.h"
29 #include "base/trace_event/memory_dump_manager.h" 29 #include "base/trace_event/memory_dump_manager.h"
30 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
32 #include "build/build_config.h" 32 #include "build/build_config.h"
33 #include "gpu/command_buffer/common/mailbox.h" 33 #include "gpu/command_buffer/common/mailbox.h"
34 #include "gpu/command_buffer/service/image_factory.h" 34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h" 35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/preemption_flag.h" 36 #include "gpu/command_buffer/service/preemption_flag.h"
37 #include "gpu/command_buffer/service/scheduler.h"
37 #include "gpu/ipc/common/gpu_messages.h" 38 #include "gpu/ipc/common/gpu_messages.h"
38 #include "gpu/ipc/service/gpu_channel_manager.h" 39 #include "gpu/ipc/service/gpu_channel_manager.h"
39 #include "gpu/ipc/service/gpu_channel_manager_delegate.h" 40 #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
40 #include "gpu/ipc/service/gpu_memory_buffer_factory.h" 41 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
41 #include "ipc/ipc_channel.h" 42 #include "ipc/ipc_channel.h"
42 #include "ipc/message_filter.h" 43 #include "ipc/message_filter.h"
43 #include "ui/gl/gl_context.h" 44 #include "ui/gl/gl_context.h"
44 #include "ui/gl/gl_image_shared_memory.h" 45 #include "ui/gl/gl_image_shared_memory.h"
45 #include "ui/gl/gl_surface.h" 46 #include "ui/gl/gl_surface.h"
46 47
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) { 93 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) {
93 channel_->AddFilter(filter); 94 channel_->AddFilter(filter);
94 } 95 }
95 96
96 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) { 97 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) {
97 channel_->RemoveFilter(filter); 98 channel_->RemoveFilter(filter);
98 } 99 }
99 100
100 GpuChannelMessageQueue::GpuChannelMessageQueue( 101 GpuChannelMessageQueue::GpuChannelMessageQueue(
101 GpuChannel* channel, 102 GpuChannel* channel,
103 scoped_refptr<SyncPointOrderData> sync_point_order_data,
102 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, 104 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
103 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 105 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
104 scoped_refptr<PreemptionFlag> preempting_flag, 106 scoped_refptr<PreemptionFlag> preempting_flag,
105 scoped_refptr<PreemptionFlag> preempted_flag, 107 scoped_refptr<PreemptionFlag> preempted_flag)
106 SyncPointManager* sync_point_manager)
107 : channel_(channel), 108 : channel_(channel),
108 max_preemption_time_( 109 max_preemption_time_(
109 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), 110 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)),
110 timer_(new base::OneShotTimer), 111 timer_(new base::OneShotTimer),
111 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), 112 sync_point_order_data_(std::move(sync_point_order_data)),
112 main_task_runner_(std::move(main_task_runner)), 113 main_task_runner_(std::move(main_task_runner)),
113 io_task_runner_(std::move(io_task_runner)), 114 io_task_runner_(std::move(io_task_runner)),
114 preempting_flag_(std::move(preempting_flag)), 115 preempting_flag_(std::move(preempting_flag)),
115 preempted_flag_(std::move(preempted_flag)), 116 preempted_flag_(std::move(preempted_flag)) {
116 sync_point_manager_(sync_point_manager) {
117 timer_->SetTaskRunner(io_task_runner_); 117 timer_->SetTaskRunner(io_task_runner_);
118 io_thread_checker_.DetachFromThread(); 118 io_thread_checker_.DetachFromThread();
119 } 119 }
120 120
121 GpuChannelMessageQueue::~GpuChannelMessageQueue() { 121 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
122 DCHECK(channel_messages_.empty()); 122 DCHECK(channel_messages_.empty());
123 } 123 }
124 124
125 void GpuChannelMessageQueue::Destroy() { 125 void GpuChannelMessageQueue::Destroy() {
126 // We guarantee that the queue will no longer be modified after Destroy is
127 // called, it is now safe to modify the queue without the lock. All public
128 // facing modifying functions check enabled_ while all private modifying
129 // functions DCHECK(enabled_) to enforce this.
130 while (!channel_messages_.empty()) {
131 const IPC::Message& msg = channel_messages_.front()->message;
132 if (msg.is_sync()) {
133 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg);
134 reply->set_reply_error();
135 channel_->Send(reply);
136 }
137 channel_messages_.pop_front();
138 }
139
140 sync_point_order_data_->Destroy(); 126 sync_point_order_data_->Destroy();
141 127
142 if (preempting_flag_) 128 if (preempting_flag_)
143 preempting_flag_->Reset(); 129 preempting_flag_->Reset();
144 130
145 // Destroy timer on io thread. 131 // Destroy timer on io thread.
146 io_task_runner_->PostTask( 132 io_task_runner_->PostTask(
147 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {}, 133 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {},
148 base::Passed(&timer_))); 134 base::Passed(&timer_)));
149 135
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
432 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); 418 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING);
433 DCHECK(!scheduled_); 419 DCHECK(!scheduled_);
434 420
435 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 421 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
436 preempting_flag_->Reset(); 422 preempting_flag_->Reset();
437 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 423 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
438 } 424 }
439 425
440 GpuChannelMessageFilter::GpuChannelMessageFilter( 426 GpuChannelMessageFilter::GpuChannelMessageFilter(
441 GpuChannel* gpu_channel, 427 GpuChannel* gpu_channel,
428 Scheduler* scheduler,
442 scoped_refptr<GpuChannelMessageQueue> message_queue, 429 scoped_refptr<GpuChannelMessageQueue> message_queue,
443 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) 430 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
444 : gpu_channel_(gpu_channel), 431 : gpu_channel_(gpu_channel),
432 scheduler_(scheduler),
445 message_queue_(std::move(message_queue)), 433 message_queue_(std::move(message_queue)),
446 main_task_runner_(std::move(main_task_runner)) {} 434 main_task_runner_(std::move(main_task_runner)) {}
447 435
448 GpuChannelMessageFilter::~GpuChannelMessageFilter() { 436 GpuChannelMessageFilter::~GpuChannelMessageFilter() {
449 DCHECK(!gpu_channel_); 437 DCHECK(!gpu_channel_);
450 } 438 }
451 439
452 void GpuChannelMessageFilter::Destroy() { 440 void GpuChannelMessageFilter::Destroy() {
453 base::AutoLock auto_lock(gpu_channel_lock_); 441 base::AutoLock auto_lock(gpu_channel_lock_);
454 gpu_channel_ = nullptr; 442 gpu_channel_ = nullptr;
455 } 443 }
456 444
445 void GpuChannelMessageFilter::AddRoute(int32_t route_id,
446 SequenceId sequence_id) {
447 base::AutoLock auto_lock(gpu_channel_lock_);
448 DCHECK(gpu_channel_);
449 DCHECK(scheduler_);
450 route_sequences_[route_id] = sequence_id;
451 }
452
453 void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) {
454 base::AutoLock auto_lock(gpu_channel_lock_);
455 DCHECK(gpu_channel_);
456 DCHECK(scheduler_);
457 route_sequences_.erase(route_id);
458 }
459
457 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { 460 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) {
458 DCHECK(!ipc_channel_); 461 DCHECK(!ipc_channel_);
459 ipc_channel_ = channel; 462 ipc_channel_ = channel;
460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 463 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
461 filter->OnFilterAdded(ipc_channel_); 464 filter->OnFilterAdded(ipc_channel_);
462 } 465 }
463 466
464 void GpuChannelMessageFilter::OnFilterRemoved() { 467 void GpuChannelMessageFilter::OnFilterRemoved() {
465 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 468 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
466 filter->OnFilterRemoved(); 469 filter->OnFilterRemoved();
(...skipping 30 matching lines...) Expand all
497 void GpuChannelMessageFilter::RemoveChannelFilter( 500 void GpuChannelMessageFilter::RemoveChannelFilter(
498 scoped_refptr<IPC::MessageFilter> filter) { 501 scoped_refptr<IPC::MessageFilter> filter) {
499 if (ipc_channel_) 502 if (ipc_channel_)
500 filter->OnFilterRemoved(); 503 filter->OnFilterRemoved();
501 base::Erase(channel_filters_, filter); 504 base::Erase(channel_filters_, filter);
502 } 505 }
503 506
504 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { 507 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
505 DCHECK(ipc_channel_); 508 DCHECK(ipc_channel_);
506 509
510 if (!gpu_channel_)
511 return MessageErrorHandler(message, "Channel destroyed");
512
507 if (message.should_unblock() || message.is_reply()) 513 if (message.should_unblock() || message.is_reply())
508 return MessageErrorHandler(message, "Unexpected message type"); 514 return MessageErrorHandler(message, "Unexpected message type");
509 515
510 if (message.type() == GpuChannelMsg_Nop::ID) { 516 if (message.type() == GpuChannelMsg_Nop::ID) {
511 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 517 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
512 Send(reply); 518 Send(reply);
513 return true; 519 return true;
514 } 520 }
515 521
516 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 522 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
517 if (filter->OnMessageReceived(message)) 523 if (filter->OnMessageReceived(message))
518 return true; 524 return true;
519 } 525 }
520 526
521 base::AutoLock auto_lock(gpu_channel_lock_); 527 base::AutoLock auto_lock(gpu_channel_lock_);
522 if (!gpu_channel_) 528 if (!gpu_channel_)
523 return MessageErrorHandler(message, "Channel destroyed"); 529 return MessageErrorHandler(message, "Channel destroyed");
524 530
525 if (message.routing_id() == MSG_ROUTING_CONTROL || 531 if (message.routing_id() == MSG_ROUTING_CONTROL ||
526 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || 532 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
527 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { 533 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
528 // It's OK to post task that may never run even for sync messages, because 534 // It's OK to post task that may never run even for sync messages, because
529 // if the channel is destroyed, the client Send will fail. 535 // if the channel is destroyed, the client Send will fail.
530 main_task_runner_->PostTask(FROM_HERE, 536 main_task_runner_->PostTask(FROM_HERE,
531 base::Bind(&GpuChannel::HandleOutOfOrderMessage, 537 base::Bind(&GpuChannel::HandleOutOfOrderMessage,
532 gpu_channel_->AsWeakPtr(), message)); 538 gpu_channel_->AsWeakPtr(), message));
539 } else if (scheduler_) {
540 SequenceId sequence_id = route_sequences_[message.routing_id()];
541 if (sequence_id.is_null())
542 return MessageErrorHandler(message, "Invalid route");
543
544 std::vector<SyncToken> sync_token_fences;
545 if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) {
546 GpuCommandBufferMsg_AsyncFlush::Param params;
547 if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, &params))
548 return MessageErrorHandler(message, "Invalid flush message");
549 sync_token_fences = std::get<3>(params);
550 }
551
552 scheduler_->ScheduleTask(sequence_id,
553 base::BindOnce(&GpuChannel::HandleMessage,
554 gpu_channel_->AsWeakPtr(), message),
555 sync_token_fences);
533 } else { 556 } else {
534 // Message queue takes care of PostTask. 557 // Message queue takes care of PostTask.
535 message_queue_->PushBackMessage(message); 558 message_queue_->PushBackMessage(message);
536 } 559 }
537 560
538 return true; 561 return true;
539 } 562 }
540 563
541 bool GpuChannelMessageFilter::Send(IPC::Message* message) { 564 bool GpuChannelMessageFilter::Send(IPC::Message* message) {
542 return ipc_channel_->Send(message); 565 return ipc_channel_->Send(message);
(...skipping 11 matching lines...) Expand all
554 } 577 }
555 578
556 // Definitions for constructor and destructor of this interface are needed to 579 // Definitions for constructor and destructor of this interface are needed to
557 // avoid MSVC LNK2019. 580 // avoid MSVC LNK2019.
558 FilteredSender::FilteredSender() = default; 581 FilteredSender::FilteredSender() = default;
559 582
560 FilteredSender::~FilteredSender() = default; 583 FilteredSender::~FilteredSender() = default;
561 584
562 GpuChannel::GpuChannel( 585 GpuChannel::GpuChannel(
563 GpuChannelManager* gpu_channel_manager, 586 GpuChannelManager* gpu_channel_manager,
587 Scheduler* scheduler,
564 SyncPointManager* sync_point_manager, 588 SyncPointManager* sync_point_manager,
565 GpuWatchdogThread* watchdog, 589 GpuWatchdogThread* watchdog,
566 scoped_refptr<gl::GLShareGroup> share_group, 590 scoped_refptr<gl::GLShareGroup> share_group,
567 scoped_refptr<gles2::MailboxManager> mailbox_manager, 591 scoped_refptr<gles2::MailboxManager> mailbox_manager,
568 scoped_refptr<PreemptionFlag> preempting_flag, 592 scoped_refptr<PreemptionFlag> preempting_flag,
569 scoped_refptr<PreemptionFlag> preempted_flag, 593 scoped_refptr<PreemptionFlag> preempted_flag,
570 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 594 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
571 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 595 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
572 int32_t client_id, 596 int32_t client_id,
573 uint64_t client_tracing_id, 597 uint64_t client_tracing_id,
574 bool is_gpu_host) 598 bool is_gpu_host)
575 : gpu_channel_manager_(gpu_channel_manager), 599 : gpu_channel_manager_(gpu_channel_manager),
600 scheduler_(scheduler),
576 sync_point_manager_(sync_point_manager), 601 sync_point_manager_(sync_point_manager),
577 preempting_flag_(preempting_flag), 602 preempting_flag_(preempting_flag),
578 preempted_flag_(preempted_flag), 603 preempted_flag_(preempted_flag),
579 client_id_(client_id), 604 client_id_(client_id),
580 client_tracing_id_(client_tracing_id), 605 client_tracing_id_(client_tracing_id),
581 task_runner_(task_runner), 606 task_runner_(task_runner),
582 io_task_runner_(io_task_runner), 607 io_task_runner_(io_task_runner),
583 share_group_(share_group), 608 share_group_(share_group),
584 mailbox_manager_(mailbox_manager), 609 mailbox_manager_(mailbox_manager),
585 watchdog_(watchdog), 610 watchdog_(watchdog),
586 is_gpu_host_(is_gpu_host), 611 is_gpu_host_(is_gpu_host),
587 weak_factory_(this) { 612 weak_factory_(this) {
588 DCHECK(gpu_channel_manager); 613 DCHECK(gpu_channel_manager_);
589 DCHECK(client_id); 614 DCHECK(client_id_);
590 615
591 message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner, 616 if (!scheduler_) {
592 preempting_flag, preempted_flag, 617 message_queue_ = new GpuChannelMessageQueue(
593 sync_point_manager); 618 this, sync_point_manager->CreateSyncPointOrderData(), task_runner,
619 io_task_runner, preempting_flag, preempted_flag);
620 }
594 621
595 filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner); 622 filter_ =
623 new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner);
596 } 624 }
597 625
598 GpuChannel::~GpuChannel() { 626 GpuChannel::~GpuChannel() {
599 // Clear stubs first because of dependencies. 627 // Clear stubs first because of dependencies.
600 stubs_.clear(); 628 stubs_.clear();
601 629
602 // Destroy filter first so that no message queue gets no more messages. 630 // Destroy filter first so that no message queue gets no more messages.
603 filter_->Destroy(); 631 filter_->Destroy();
604 632
605 message_queue_->Destroy(); 633 if (scheduler_) {
634 for (const auto& kv : stream_sequences_)
635 scheduler_->DestroySequence(kv.second);
636 } else {
637 message_queue_->Destroy();
638 }
606 639
607 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet()); 640 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet());
608 } 641 }
609 642
610 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) { 643 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) {
611 channel_ = std::move(channel); 644 channel_ = std::move(channel);
612 channel_->AddFilter(filter_.get()); 645 channel_->AddFilter(filter_.get());
613 } 646 }
614 647
615 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { 648 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 682
650 if (!channel_) { 683 if (!channel_) {
651 delete message; 684 delete message;
652 return false; 685 return false;
653 } 686 }
654 687
655 return channel_->Send(message); 688 return channel_->Send(message);
656 } 689 }
657 690
658 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) { 691 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) {
659 message_queue_->SetScheduled(true); 692 if (scheduler_) {
660 // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence. 693 scheduler_->EnableSequence(stub->sequence_id());
694 } else {
695 message_queue_->SetScheduled(true);
696 }
661 } 697 }
662 698
663 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) { 699 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) {
664 message_queue_->SetScheduled(false); 700 if (scheduler_) {
665 // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence. 701 scheduler_->DisableSequence(stub->sequence_id());
702 } else {
703 message_queue_->SetScheduled(false);
704 }
666 } 705 }
667 706
668 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { 707 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
669 auto it = stubs_.find(route_id); 708 auto it = stubs_.find(route_id);
670 if (it == stubs_.end()) 709 if (it == stubs_.end())
671 return nullptr; 710 return nullptr;
672 711
673 return it->second.get(); 712 return it->second.get();
674 } 713 }
675 714
676 void GpuChannel::LoseAllContexts() { 715 void GpuChannel::LoseAllContexts() {
677 gpu_channel_manager_->LoseAllContexts(); 716 gpu_channel_manager_->LoseAllContexts();
678 } 717 }
679 718
680 void GpuChannel::MarkAllContextsLost() { 719 void GpuChannel::MarkAllContextsLost() {
681 for (auto& kv : stubs_) 720 for (auto& kv : stubs_)
682 kv.second->MarkContextLost(); 721 kv.second->MarkContextLost();
683 } 722 }
684 723
685 bool GpuChannel::AddRoute(int32_t route_id, 724 bool GpuChannel::AddRoute(int32_t route_id,
686 SequenceId sequence_id, 725 SequenceId sequence_id,
687 IPC::Listener* listener) { 726 IPC::Listener* listener) {
688 // TODO(sunnyps): Add route id to sequence id mapping to filter. 727 if (scheduler_)
728 filter_->AddRoute(route_id, sequence_id);
689 return router_.AddRoute(route_id, listener); 729 return router_.AddRoute(route_id, listener);
690 } 730 }
691 731
692 void GpuChannel::RemoveRoute(int32_t route_id) { 732 void GpuChannel::RemoveRoute(int32_t route_id) {
733 if (scheduler_)
734 filter_->RemoveRoute(route_id);
693 router_.RemoveRoute(route_id); 735 router_.RemoveRoute(route_id);
694 } 736 }
695 737
696 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 738 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
697 bool handled = true; 739 bool handled = true;
698 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 740 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
699 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer, 741 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer,
700 OnCreateCommandBuffer) 742 OnCreateCommandBuffer)
701 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, 743 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
702 OnDestroyCommandBuffer) 744 OnDestroyCommandBuffer)
703 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, 745 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds,
704 OnGetDriverBugWorkArounds) 746 OnGetDriverBugWorkArounds)
705 IPC_MESSAGE_UNHANDLED(handled = false) 747 IPC_MESSAGE_UNHANDLED(handled = false)
706 IPC_END_MESSAGE_MAP() 748 IPC_END_MESSAGE_MAP()
707 return handled; 749 return handled;
708 } 750 }
709 751
752 void GpuChannel::HandleMessage(const IPC::Message& msg) {
753 int32_t routing_id = msg.routing_id();
754 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
755
756 DCHECK(!stub || stub->IsScheduled());
757
758 DVLOG(1) << "received message @" << &msg << " on channel @" << this
759 << " with type " << msg.type();
760
761 HandleMessageHelper(msg);
762
763 // If we get descheduled or yield while processing a message.
764 if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
765 DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
766 (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
767 scheduler_->ContinueTask(
768 stub->sequence_id(),
769 base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg));
770 }
771 }
772
710 void GpuChannel::HandleMessageOnQueue() { 773 void GpuChannel::HandleMessageOnQueue() {
711 const GpuChannelMessage* channel_msg = 774 const GpuChannelMessage* channel_msg =
712 message_queue_->BeginMessageProcessing(); 775 message_queue_->BeginMessageProcessing();
713 if (!channel_msg) 776 if (!channel_msg)
714 return; 777 return;
715 778
716 const IPC::Message& msg = channel_msg->message; 779 const IPC::Message& msg = channel_msg->message;
717 int32_t routing_id = msg.routing_id(); 780 int32_t routing_id = msg.routing_id();
718 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); 781 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
719 782
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
792 *capabilities = gpu::Capabilities(); 855 *capabilities = gpu::Capabilities();
793 } 856 }
794 } 857 }
795 858
796 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( 859 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
797 const GPUCreateCommandBufferConfig& init_params, 860 const GPUCreateCommandBufferConfig& init_params,
798 int32_t route_id, 861 int32_t route_id,
799 std::unique_ptr<base::SharedMemory> shared_state_shm) { 862 std::unique_ptr<base::SharedMemory> shared_state_shm) {
800 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) { 863 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) {
801 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " 864 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a "
802 "view context on a non-priviledged channel"; 865 "view context on a non-privileged channel";
803 return nullptr; 866 return nullptr;
804 } 867 }
805 868
806 int32_t share_group_id = init_params.share_group_id; 869 int32_t share_group_id = init_params.share_group_id;
807 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id); 870 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
808 871
809 if (!share_group && share_group_id != MSG_ROUTING_NONE) { 872 if (!share_group && share_group_id != MSG_ROUTING_NONE) {
810 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id"; 873 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id";
811 return nullptr; 874 return nullptr;
812 } 875 }
813 876
814 int32_t stream_id = init_params.stream_id; 877 int32_t stream_id = init_params.stream_id;
815 if (share_group && stream_id != share_group->stream_id()) { 878 if (share_group && stream_id != share_group->stream_id()) {
816 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not " 879 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not "
817 "match share group stream id"; 880 "match share group stream id";
818 return nullptr; 881 return nullptr;
819 } 882 }
820 883
821 GpuStreamPriority stream_priority = init_params.stream_priority; 884 SchedulingPriority stream_priority = init_params.stream_priority;
822 if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) { 885 if (stream_priority <= SchedulingPriority::kHigh && !is_gpu_host_) {
823 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream " 886 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream "
824 "priority not allowed"; 887 "not allowed on a non-privileged channel";
825 return nullptr; 888 return nullptr;
826 } 889 }
827 890
828 if (share_group && !share_group->decoder()) { 891 if (share_group && !share_group->decoder()) {
829 // This should catch test errors where we did not Initialize the 892 // This should catch test errors where we did not Initialize the
830 // share_group's CommandBuffer. 893 // share_group's CommandBuffer.
831 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 894 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
832 "not initialized"; 895 "not initialized";
833 return nullptr; 896 return nullptr;
834 } 897 }
835 898
836 if (share_group && share_group->decoder()->WasContextLost()) { 899 if (share_group && share_group->decoder()->WasContextLost()) {
837 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 900 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
838 "already lost"; 901 "already lost";
839 return nullptr; 902 return nullptr;
840 } 903 }
841 904
842 CommandBufferId command_buffer_id = 905 CommandBufferId command_buffer_id =
843 GenerateCommandBufferId(client_id_, route_id); 906 GenerateCommandBufferId(client_id_, route_id);
844 907
845 // TODO(sunnyps): Lookup sequence id using stream id to sequence id map. 908 SequenceId sequence_id;
846 SequenceId sequence_id = message_queue_->sequence_id(); 909 if (scheduler_) {
910 sequence_id = stream_sequences_[stream_id];
911 if (sequence_id.is_null()) {
912 sequence_id = scheduler_->CreateSequence(stream_priority);
913 stream_sequences_[stream_id] = sequence_id;
914 }
915 } else {
916 sequence_id = message_queue_->sequence_id();
917 }
847 918
848 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create( 919 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
849 this, share_group, init_params, command_buffer_id, sequence_id, stream_id, 920 this, share_group, init_params, command_buffer_id, sequence_id, stream_id,
850 route_id, std::move(shared_state_shm))); 921 route_id, std::move(shared_state_shm)));
851 922
852 if (!AddRoute(route_id, sequence_id, stub.get())) { 923 if (!AddRoute(route_id, sequence_id, stub.get())) {
853 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route"; 924 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route";
854 return nullptr; 925 return nullptr;
855 } 926 }
856 927
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
946 1017
947 return manager->gpu_memory_buffer_factory() 1018 return manager->gpu_memory_buffer_factory()
948 ->AsImageFactory() 1019 ->AsImageFactory()
949 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, 1020 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat,
950 client_id_, surface_handle); 1021 client_id_, surface_handle);
951 } 1022 }
952 } 1023 }
953 } 1024 }
954 1025
955 } // namespace gpu 1026 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698