Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(427)

Side by Side Diff: gpu/ipc/service/gpu_channel.cc

Issue 2814843002: gpu: GPU service scheduler. (Closed)
Patch Set: rebase Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/service/gpu_channel.h" 5 #include "gpu/ipc/service/gpu_channel.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #if defined(OS_WIN) 9 #if defined(OS_WIN)
10 #include <windows.h> 10 #include <windows.h>
(...skipping 16 matching lines...) Expand all
27 #include "base/threading/thread_task_runner_handle.h" 27 #include "base/threading/thread_task_runner_handle.h"
28 #include "base/timer/timer.h" 28 #include "base/timer/timer.h"
29 #include "base/trace_event/memory_dump_manager.h" 29 #include "base/trace_event/memory_dump_manager.h"
30 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
32 #include "build/build_config.h" 32 #include "build/build_config.h"
33 #include "gpu/command_buffer/common/mailbox.h" 33 #include "gpu/command_buffer/common/mailbox.h"
34 #include "gpu/command_buffer/service/image_factory.h" 34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h" 35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/preemption_flag.h" 36 #include "gpu/command_buffer/service/preemption_flag.h"
37 #include "gpu/command_buffer/service/scheduler.h"
37 #include "gpu/ipc/common/gpu_messages.h" 38 #include "gpu/ipc/common/gpu_messages.h"
38 #include "gpu/ipc/service/gpu_channel_manager.h" 39 #include "gpu/ipc/service/gpu_channel_manager.h"
39 #include "gpu/ipc/service/gpu_channel_manager_delegate.h" 40 #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
40 #include "gpu/ipc/service/gpu_memory_buffer_factory.h" 41 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
41 #include "ipc/ipc_channel.h" 42 #include "ipc/ipc_channel.h"
42 #include "ipc/message_filter.h" 43 #include "ipc/message_filter.h"
43 #include "ui/gl/gl_context.h" 44 #include "ui/gl/gl_context.h"
44 #include "ui/gl/gl_image_shared_memory.h" 45 #include "ui/gl/gl_image_shared_memory.h"
45 #include "ui/gl/gl_surface.h" 46 #include "ui/gl/gl_surface.h"
46 47
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) { 93 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) {
93 channel_->AddFilter(filter); 94 channel_->AddFilter(filter);
94 } 95 }
95 96
96 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) { 97 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) {
97 channel_->RemoveFilter(filter); 98 channel_->RemoveFilter(filter);
98 } 99 }
99 100
100 GpuChannelMessageQueue::GpuChannelMessageQueue( 101 GpuChannelMessageQueue::GpuChannelMessageQueue(
101 GpuChannel* channel, 102 GpuChannel* channel,
103 scoped_refptr<SyncPointOrderData> sync_point_order_data,
102 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, 104 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
103 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 105 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
104 scoped_refptr<PreemptionFlag> preempting_flag, 106 scoped_refptr<PreemptionFlag> preempting_flag,
105 scoped_refptr<PreemptionFlag> preempted_flag, 107 scoped_refptr<PreemptionFlag> preempted_flag)
106 SyncPointManager* sync_point_manager)
107 : channel_(channel), 108 : channel_(channel),
108 max_preemption_time_( 109 max_preemption_time_(
109 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), 110 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)),
110 timer_(new base::OneShotTimer), 111 timer_(new base::OneShotTimer),
111 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), 112 sync_point_order_data_(std::move(sync_point_order_data)),
112 main_task_runner_(std::move(main_task_runner)), 113 main_task_runner_(std::move(main_task_runner)),
113 io_task_runner_(std::move(io_task_runner)), 114 io_task_runner_(std::move(io_task_runner)),
114 preempting_flag_(std::move(preempting_flag)), 115 preempting_flag_(std::move(preempting_flag)),
115 preempted_flag_(std::move(preempted_flag)), 116 preempted_flag_(std::move(preempted_flag)) {
116 sync_point_manager_(sync_point_manager) {
117 timer_->SetTaskRunner(io_task_runner_); 117 timer_->SetTaskRunner(io_task_runner_);
118 io_thread_checker_.DetachFromThread(); 118 io_thread_checker_.DetachFromThread();
119 } 119 }
120 120
121 GpuChannelMessageQueue::~GpuChannelMessageQueue() { 121 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
122 DCHECK(channel_messages_.empty()); 122 DCHECK(channel_messages_.empty());
123 } 123 }
124 124
125 void GpuChannelMessageQueue::Destroy() { 125 void GpuChannelMessageQueue::Destroy() {
126 // We guarantee that the queue will no longer be modified after Destroy is 126 // We guarantee that the queue will no longer be modified after Destroy is
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after
432 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); 432 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING);
433 DCHECK(!scheduled_); 433 DCHECK(!scheduled_);
434 434
435 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 435 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
436 preempting_flag_->Reset(); 436 preempting_flag_->Reset();
437 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 437 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
438 } 438 }
439 439
440 GpuChannelMessageFilter::GpuChannelMessageFilter( 440 GpuChannelMessageFilter::GpuChannelMessageFilter(
441 GpuChannel* gpu_channel, 441 GpuChannel* gpu_channel,
442 Scheduler* scheduler,
442 scoped_refptr<GpuChannelMessageQueue> message_queue, 443 scoped_refptr<GpuChannelMessageQueue> message_queue,
443 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) 444 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
444 : gpu_channel_(gpu_channel), 445 : gpu_channel_(gpu_channel),
446 scheduler_(scheduler),
445 message_queue_(std::move(message_queue)), 447 message_queue_(std::move(message_queue)),
446 main_task_runner_(std::move(main_task_runner)) {} 448 main_task_runner_(std::move(main_task_runner)) {}
447 449
448 GpuChannelMessageFilter::~GpuChannelMessageFilter() { 450 GpuChannelMessageFilter::~GpuChannelMessageFilter() {
449 DCHECK(!gpu_channel_); 451 DCHECK(!gpu_channel_);
450 } 452 }
451 453
452 void GpuChannelMessageFilter::Destroy() { 454 void GpuChannelMessageFilter::Destroy() {
453 base::AutoLock auto_lock(gpu_channel_lock_); 455 base::AutoLock auto_lock(gpu_channel_lock_);
454 gpu_channel_ = nullptr; 456 gpu_channel_ = nullptr;
455 } 457 }
456 458
459 void GpuChannelMessageFilter::AddRoute(int32_t route_id,
460 SequenceId sequence_id) {
461 base::AutoLock auto_lock(gpu_channel_lock_);
462 DCHECK(gpu_channel_);
463 DCHECK(scheduler_);
464 route_sequences_[route_id] = sequence_id;
465 }
466
467 void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) {
468 base::AutoLock auto_lock(gpu_channel_lock_);
469 DCHECK(gpu_channel_);
470 DCHECK(scheduler_);
471 route_sequences_.erase(route_id);
472 }
473
457 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { 474 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) {
458 DCHECK(!ipc_channel_); 475 DCHECK(!ipc_channel_);
459 ipc_channel_ = channel; 476 ipc_channel_ = channel;
460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 477 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
461 filter->OnFilterAdded(ipc_channel_); 478 filter->OnFilterAdded(ipc_channel_);
462 } 479 }
463 480
464 void GpuChannelMessageFilter::OnFilterRemoved() { 481 void GpuChannelMessageFilter::OnFilterRemoved() {
465 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 482 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
466 filter->OnFilterRemoved(); 483 filter->OnFilterRemoved();
(...skipping 30 matching lines...) Expand all
497 void GpuChannelMessageFilter::RemoveChannelFilter( 514 void GpuChannelMessageFilter::RemoveChannelFilter(
498 scoped_refptr<IPC::MessageFilter> filter) { 515 scoped_refptr<IPC::MessageFilter> filter) {
499 if (ipc_channel_) 516 if (ipc_channel_)
500 filter->OnFilterRemoved(); 517 filter->OnFilterRemoved();
501 base::Erase(channel_filters_, filter); 518 base::Erase(channel_filters_, filter);
502 } 519 }
503 520
504 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { 521 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
505 DCHECK(ipc_channel_); 522 DCHECK(ipc_channel_);
506 523
524 if (!gpu_channel_)
525 return MessageErrorHandler(message, "Channel destroyed");
526
507 if (message.should_unblock() || message.is_reply()) 527 if (message.should_unblock() || message.is_reply())
508 return MessageErrorHandler(message, "Unexpected message type"); 528 return MessageErrorHandler(message, "Unexpected message type");
509 529
510 if (message.type() == GpuChannelMsg_Nop::ID) { 530 if (message.type() == GpuChannelMsg_Nop::ID) {
511 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 531 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
512 Send(reply); 532 Send(reply);
513 return true; 533 return true;
514 } 534 }
515 535
516 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 536 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
517 if (filter->OnMessageReceived(message)) 537 if (filter->OnMessageReceived(message))
518 return true; 538 return true;
519 } 539 }
520 540
521 base::AutoLock auto_lock(gpu_channel_lock_); 541 base::AutoLock auto_lock(gpu_channel_lock_);
522 if (!gpu_channel_) 542 if (!gpu_channel_)
523 return MessageErrorHandler(message, "Channel destroyed"); 543 return MessageErrorHandler(message, "Channel destroyed");
524 544
525 if (message.routing_id() == MSG_ROUTING_CONTROL || 545 if (message.routing_id() == MSG_ROUTING_CONTROL ||
526 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || 546 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
527 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { 547 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
528 // It's OK to post task that may never run even for sync messages, because 548 // It's OK to post task that may never run even for sync messages, because
529 // if the channel is destroyed, the client Send will fail. 549 // if the channel is destroyed, the client Send will fail.
530 main_task_runner_->PostTask(FROM_HERE, 550 main_task_runner_->PostTask(FROM_HERE,
531 base::Bind(&GpuChannel::HandleOutOfOrderMessage, 551 base::Bind(&GpuChannel::HandleOutOfOrderMessage,
532 gpu_channel_->AsWeakPtr(), message)); 552 gpu_channel_->AsWeakPtr(), message));
553 } else if (scheduler_) {
554 SequenceId sequence_id = route_sequences_[message.routing_id()];
555 if (sequence_id.is_null())
556 return MessageErrorHandler(message, "Invalid route");
557
558 std::vector<SyncToken> sync_token_fences;
559 if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) {
560 GpuCommandBufferMsg_AsyncFlush::Param params;
561 if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, &params))
562 return MessageErrorHandler(message, "Invalid flush message");
563 sync_token_fences = std::get<3>(params);
564 }
565
566 scheduler_->ScheduleTask(sequence_id,
567 base::BindOnce(&GpuChannel::HandleMessage,
568 gpu_channel_->AsWeakPtr(), message),
569 sync_token_fences);
533 } else { 570 } else {
534 // Message queue takes care of PostTask. 571 // Message queue takes care of PostTask.
535 message_queue_->PushBackMessage(message); 572 message_queue_->PushBackMessage(message);
536 } 573 }
537 574
538 return true; 575 return true;
539 } 576 }
540 577
541 bool GpuChannelMessageFilter::Send(IPC::Message* message) { 578 bool GpuChannelMessageFilter::Send(IPC::Message* message) {
542 return ipc_channel_->Send(message); 579 return ipc_channel_->Send(message);
(...skipping 11 matching lines...) Expand all
554 } 591 }
555 592
556 // Definitions for constructor and destructor of this interface are needed to 593 // Definitions for constructor and destructor of this interface are needed to
557 // avoid MSVC LNK2019. 594 // avoid MSVC LNK2019.
558 FilteredSender::FilteredSender() = default; 595 FilteredSender::FilteredSender() = default;
559 596
560 FilteredSender::~FilteredSender() = default; 597 FilteredSender::~FilteredSender() = default;
561 598
562 GpuChannel::GpuChannel( 599 GpuChannel::GpuChannel(
563 GpuChannelManager* gpu_channel_manager, 600 GpuChannelManager* gpu_channel_manager,
601 Scheduler* scheduler,
564 SyncPointManager* sync_point_manager, 602 SyncPointManager* sync_point_manager,
565 GpuWatchdogThread* watchdog, 603 GpuWatchdogThread* watchdog,
566 scoped_refptr<gl::GLShareGroup> share_group, 604 scoped_refptr<gl::GLShareGroup> share_group,
567 scoped_refptr<gles2::MailboxManager> mailbox_manager, 605 scoped_refptr<gles2::MailboxManager> mailbox_manager,
568 scoped_refptr<PreemptionFlag> preempting_flag, 606 scoped_refptr<PreemptionFlag> preempting_flag,
569 scoped_refptr<PreemptionFlag> preempted_flag, 607 scoped_refptr<PreemptionFlag> preempted_flag,
570 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 608 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
571 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 609 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
572 int32_t client_id, 610 int32_t client_id,
573 uint64_t client_tracing_id, 611 uint64_t client_tracing_id,
574 bool is_gpu_host) 612 bool is_gpu_host)
575 : gpu_channel_manager_(gpu_channel_manager), 613 : gpu_channel_manager_(gpu_channel_manager),
614 scheduler_(scheduler),
576 sync_point_manager_(sync_point_manager), 615 sync_point_manager_(sync_point_manager),
577 preempting_flag_(preempting_flag), 616 preempting_flag_(preempting_flag),
578 preempted_flag_(preempted_flag), 617 preempted_flag_(preempted_flag),
579 client_id_(client_id), 618 client_id_(client_id),
580 client_tracing_id_(client_tracing_id), 619 client_tracing_id_(client_tracing_id),
581 task_runner_(task_runner), 620 task_runner_(task_runner),
582 io_task_runner_(io_task_runner), 621 io_task_runner_(io_task_runner),
583 share_group_(share_group), 622 share_group_(share_group),
584 mailbox_manager_(mailbox_manager), 623 mailbox_manager_(mailbox_manager),
585 watchdog_(watchdog), 624 watchdog_(watchdog),
586 is_gpu_host_(is_gpu_host), 625 is_gpu_host_(is_gpu_host),
587 weak_factory_(this) { 626 weak_factory_(this) {
588 DCHECK(gpu_channel_manager); 627 DCHECK(gpu_channel_manager_);
589 DCHECK(client_id); 628 DCHECK(client_id_);
590 629
591 message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner, 630 if (!scheduler_) {
592 preempting_flag, preempted_flag, 631 message_queue_ = new GpuChannelMessageQueue(
593 sync_point_manager); 632 this, sync_point_manager->CreateSyncPointOrderData(), task_runner,
633 io_task_runner, preempting_flag, preempted_flag);
634 }
594 635
595 filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner); 636 filter_ =
637 new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner);
596 } 638 }
597 639
598 GpuChannel::~GpuChannel() { 640 GpuChannel::~GpuChannel() {
599 // Clear stubs first because of dependencies. 641 // Clear stubs first because of dependencies.
600 stubs_.clear(); 642 stubs_.clear();
601 643
602 // Destroy filter first so that no message queue gets no more messages. 644 // Destroy filter first so that no message queue gets no more messages.
603 filter_->Destroy(); 645 filter_->Destroy();
604 646
605 message_queue_->Destroy(); 647 // Release IPC channel. This acts like a flag for any posted tasks.
648 channel_ = nullptr;
649
650 if (scheduler_) {
651 for (const auto& kv : stream_sequences_)
652 scheduler_->DestroySequence(kv.second);
653 } else {
654 message_queue_->Destroy();
655 }
606 656
607 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet()); 657 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet());
608 } 658 }
609 659
610 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) { 660 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) {
611 channel_ = std::move(channel); 661 channel_ = std::move(channel);
612 channel_->AddFilter(filter_.get()); 662 channel_->AddFilter(filter_.get());
613 } 663 }
614 664
615 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { 665 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 699
650 if (!channel_) { 700 if (!channel_) {
651 delete message; 701 delete message;
652 return false; 702 return false;
653 } 703 }
654 704
655 return channel_->Send(message); 705 return channel_->Send(message);
656 } 706 }
657 707
658 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) { 708 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) {
659 message_queue_->SetScheduled(true); 709 if (scheduler_) {
660 // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence. 710 scheduler_->EnableSequence(stub->sequence_id());
711 } else {
712 message_queue_->SetScheduled(true);
713 }
661 } 714 }
662 715
663 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) { 716 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) {
664 message_queue_->SetScheduled(false); 717 if (scheduler_) {
665 // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence. 718 scheduler_->DisableSequence(stub->sequence_id());
719 } else {
720 message_queue_->SetScheduled(false);
721 }
666 } 722 }
667 723
668 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { 724 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
669 auto it = stubs_.find(route_id); 725 auto it = stubs_.find(route_id);
670 if (it == stubs_.end()) 726 if (it == stubs_.end())
671 return nullptr; 727 return nullptr;
672 728
673 return it->second.get(); 729 return it->second.get();
674 } 730 }
675 731
676 void GpuChannel::LoseAllContexts() { 732 void GpuChannel::LoseAllContexts() {
677 gpu_channel_manager_->LoseAllContexts(); 733 gpu_channel_manager_->LoseAllContexts();
678 } 734 }
679 735
680 void GpuChannel::MarkAllContextsLost() { 736 void GpuChannel::MarkAllContextsLost() {
681 for (auto& kv : stubs_) 737 for (auto& kv : stubs_)
682 kv.second->MarkContextLost(); 738 kv.second->MarkContextLost();
683 } 739 }
684 740
685 bool GpuChannel::AddRoute(int32_t route_id, 741 bool GpuChannel::AddRoute(int32_t route_id,
686 SequenceId sequence_id, 742 SequenceId sequence_id,
687 IPC::Listener* listener) { 743 IPC::Listener* listener) {
688 // TODO(sunnyps): Add route id to sequence id mapping to filter. 744 if (scheduler_)
745 filter_->AddRoute(route_id, sequence_id);
689 return router_.AddRoute(route_id, listener); 746 return router_.AddRoute(route_id, listener);
690 } 747 }
691 748
692 void GpuChannel::RemoveRoute(int32_t route_id) { 749 void GpuChannel::RemoveRoute(int32_t route_id) {
750 if (scheduler_)
751 filter_->RemoveRoute(route_id);
693 router_.RemoveRoute(route_id); 752 router_.RemoveRoute(route_id);
694 } 753 }
695 754
696 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 755 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
697 bool handled = true; 756 bool handled = true;
698 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 757 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
699 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer, 758 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer,
700 OnCreateCommandBuffer) 759 OnCreateCommandBuffer)
701 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, 760 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
702 OnDestroyCommandBuffer) 761 OnDestroyCommandBuffer)
703 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, 762 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds,
704 OnGetDriverBugWorkArounds) 763 OnGetDriverBugWorkArounds)
705 IPC_MESSAGE_UNHANDLED(handled = false) 764 IPC_MESSAGE_UNHANDLED(handled = false)
706 IPC_END_MESSAGE_MAP() 765 IPC_END_MESSAGE_MAP()
707 return handled; 766 return handled;
708 } 767 }
709 768
769 void GpuChannel::HandleMessage(const IPC::Message& msg) {
770 // Even if we're in the process of being destroyed, be sure to reply to sync
771 // messages.
772 if (!channel_) {
773 if (msg.is_sync()) {
774 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg);
775 reply->set_reply_error();
776 Send(reply);
piman 2017/05/09 00:48:38 nit: if channel_ is nullptr, Send won't do anythin
sunnyps 2017/05/10 23:15:15 Done.
777 }
778 return;
779 }
780
781 int32_t routing_id = msg.routing_id();
782 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
783
784 DCHECK(!stub || stub->IsScheduled());
785
786 DVLOG(1) << "received message @" << &msg << " on channel @" << this
787 << " with type " << msg.type();
788
789 HandleMessageHelper(msg);
790
791 // If we get descheduled or yield while processing a message.
792 if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
793 DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
794 (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
795 scheduler_->ContinueTask(
796 stub->sequence_id(),
797 base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg));
798 }
799 }
800
710 void GpuChannel::HandleMessageOnQueue() { 801 void GpuChannel::HandleMessageOnQueue() {
711 const GpuChannelMessage* channel_msg = 802 const GpuChannelMessage* channel_msg =
712 message_queue_->BeginMessageProcessing(); 803 message_queue_->BeginMessageProcessing();
713 if (!channel_msg) 804 if (!channel_msg)
714 return; 805 return;
715 806
716 const IPC::Message& msg = channel_msg->message; 807 const IPC::Message& msg = channel_msg->message;
717 int32_t routing_id = msg.routing_id(); 808 int32_t routing_id = msg.routing_id();
718 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); 809 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
719 810
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
792 *capabilities = gpu::Capabilities(); 883 *capabilities = gpu::Capabilities();
793 } 884 }
794 } 885 }
795 886
796 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( 887 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
797 const GPUCreateCommandBufferConfig& init_params, 888 const GPUCreateCommandBufferConfig& init_params,
798 int32_t route_id, 889 int32_t route_id,
799 std::unique_ptr<base::SharedMemory> shared_state_shm) { 890 std::unique_ptr<base::SharedMemory> shared_state_shm) {
800 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) { 891 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) {
801 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " 892 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a "
802 "view context on a non-priviledged channel"; 893 "view context on a non-privileged channel";
803 return nullptr; 894 return nullptr;
804 } 895 }
805 896
806 int32_t share_group_id = init_params.share_group_id; 897 int32_t share_group_id = init_params.share_group_id;
807 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id); 898 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
808 899
809 if (!share_group && share_group_id != MSG_ROUTING_NONE) { 900 if (!share_group && share_group_id != MSG_ROUTING_NONE) {
810 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id"; 901 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id";
811 return nullptr; 902 return nullptr;
812 } 903 }
813 904
814 int32_t stream_id = init_params.stream_id; 905 int32_t stream_id = init_params.stream_id;
815 if (share_group && stream_id != share_group->stream_id()) { 906 if (share_group && stream_id != share_group->stream_id()) {
816 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not " 907 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not "
817 "match share group stream id"; 908 "match share group stream id";
818 return nullptr; 909 return nullptr;
819 } 910 }
820 911
821 GpuStreamPriority stream_priority = init_params.stream_priority; 912 GpuStreamPriority stream_priority = init_params.stream_priority;
822 if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) { 913 if (stream_priority <= GpuStreamPriority::HIGH && !is_gpu_host_) {
823 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream " 914 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream "
824 "priority not allowed"; 915 "not allowed on a non-privileged channel";
825 return nullptr; 916 return nullptr;
826 } 917 }
827 918
828 if (share_group && !share_group->decoder()) { 919 if (share_group && !share_group->decoder()) {
829 // This should catch test errors where we did not Initialize the 920 // This should catch test errors where we did not Initialize the
830 // share_group's CommandBuffer. 921 // share_group's CommandBuffer.
831 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 922 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
832 "not initialized"; 923 "not initialized";
833 return nullptr; 924 return nullptr;
834 } 925 }
835 926
836 if (share_group && share_group->decoder()->WasContextLost()) { 927 if (share_group && share_group->decoder()->WasContextLost()) {
837 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 928 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
838 "already lost"; 929 "already lost";
839 return nullptr; 930 return nullptr;
840 } 931 }
841 932
842 CommandBufferId command_buffer_id = 933 CommandBufferId command_buffer_id =
843 GenerateCommandBufferId(client_id_, route_id); 934 GenerateCommandBufferId(client_id_, route_id);
844 935
845 // TODO(sunnyps): Lookup sequence id using stream id to sequence id map. 936 SequenceId sequence_id;
846 SequenceId sequence_id = message_queue_->sequence_id(); 937 if (scheduler_) {
938 sequence_id = stream_sequences_[stream_id];
939 if (sequence_id.is_null()) {
940 sequence_id = scheduler_->CreateSequence(stream_priority);
941 stream_sequences_[stream_id] = sequence_id;
942 }
943 } else {
944 sequence_id = message_queue_->sequence_id();
945 }
847 946
848 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create( 947 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
849 this, share_group, init_params, command_buffer_id, sequence_id, stream_id, 948 this, share_group, init_params, command_buffer_id, sequence_id, stream_id,
850 route_id, std::move(shared_state_shm))); 949 route_id, std::move(shared_state_shm)));
851 950
852 if (!AddRoute(route_id, sequence_id, stub.get())) { 951 if (!AddRoute(route_id, sequence_id, stub.get())) {
853 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route"; 952 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route";
854 return nullptr; 953 return nullptr;
855 } 954 }
856 955
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
946 1045
947 return manager->gpu_memory_buffer_factory() 1046 return manager->gpu_memory_buffer_factory()
948 ->AsImageFactory() 1047 ->AsImageFactory()
949 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, 1048 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat,
950 client_id_, surface_handle); 1049 client_id_, surface_handle);
951 } 1050 }
952 } 1051 }
953 } 1052 }
954 1053
955 } // namespace gpu 1054 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698