Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(58)

Side by Side Diff: gpu/ipc/service/gpu_channel.cc

Issue 2881813002: Revert of gpu: GPU service scheduler. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/service/gpu_channel.h" 5 #include "gpu/ipc/service/gpu_channel.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #if defined(OS_WIN) 9 #if defined(OS_WIN)
10 #include <windows.h> 10 #include <windows.h>
(...skipping 16 matching lines...) Expand all
27 #include "base/threading/thread_task_runner_handle.h" 27 #include "base/threading/thread_task_runner_handle.h"
28 #include "base/timer/timer.h" 28 #include "base/timer/timer.h"
29 #include "base/trace_event/memory_dump_manager.h" 29 #include "base/trace_event/memory_dump_manager.h"
30 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
32 #include "build/build_config.h" 32 #include "build/build_config.h"
33 #include "gpu/command_buffer/common/mailbox.h" 33 #include "gpu/command_buffer/common/mailbox.h"
34 #include "gpu/command_buffer/service/image_factory.h" 34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h" 35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/preemption_flag.h" 36 #include "gpu/command_buffer/service/preemption_flag.h"
37 #include "gpu/command_buffer/service/scheduler.h"
38 #include "gpu/ipc/common/gpu_messages.h" 37 #include "gpu/ipc/common/gpu_messages.h"
39 #include "gpu/ipc/service/gpu_channel_manager.h" 38 #include "gpu/ipc/service/gpu_channel_manager.h"
40 #include "gpu/ipc/service/gpu_channel_manager_delegate.h" 39 #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
41 #include "gpu/ipc/service/gpu_memory_buffer_factory.h" 40 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
42 #include "ipc/ipc_channel.h" 41 #include "ipc/ipc_channel.h"
43 #include "ipc/message_filter.h" 42 #include "ipc/message_filter.h"
44 #include "ui/gl/gl_context.h" 43 #include "ui/gl/gl_context.h"
45 #include "ui/gl/gl_image_shared_memory.h" 44 #include "ui/gl/gl_image_shared_memory.h"
46 #include "ui/gl/gl_surface.h" 45 #include "ui/gl/gl_surface.h"
47 46
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) { 92 void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) {
94 channel_->AddFilter(filter); 93 channel_->AddFilter(filter);
95 } 94 }
96 95
97 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) { 96 void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) {
98 channel_->RemoveFilter(filter); 97 channel_->RemoveFilter(filter);
99 } 98 }
100 99
101 GpuChannelMessageQueue::GpuChannelMessageQueue( 100 GpuChannelMessageQueue::GpuChannelMessageQueue(
102 GpuChannel* channel, 101 GpuChannel* channel,
103 scoped_refptr<SyncPointOrderData> sync_point_order_data,
104 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, 102 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
105 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 103 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
106 scoped_refptr<PreemptionFlag> preempting_flag, 104 scoped_refptr<PreemptionFlag> preempting_flag,
107 scoped_refptr<PreemptionFlag> preempted_flag) 105 scoped_refptr<PreemptionFlag> preempted_flag,
106 SyncPointManager* sync_point_manager)
108 : channel_(channel), 107 : channel_(channel),
109 max_preemption_time_( 108 max_preemption_time_(
110 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), 109 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)),
111 timer_(new base::OneShotTimer), 110 timer_(new base::OneShotTimer),
112 sync_point_order_data_(std::move(sync_point_order_data)), 111 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()),
113 main_task_runner_(std::move(main_task_runner)), 112 main_task_runner_(std::move(main_task_runner)),
114 io_task_runner_(std::move(io_task_runner)), 113 io_task_runner_(std::move(io_task_runner)),
115 preempting_flag_(std::move(preempting_flag)), 114 preempting_flag_(std::move(preempting_flag)),
116 preempted_flag_(std::move(preempted_flag)) { 115 preempted_flag_(std::move(preempted_flag)),
116 sync_point_manager_(sync_point_manager) {
117 timer_->SetTaskRunner(io_task_runner_); 117 timer_->SetTaskRunner(io_task_runner_);
118 io_thread_checker_.DetachFromThread(); 118 io_thread_checker_.DetachFromThread();
119 } 119 }
120 120
121 GpuChannelMessageQueue::~GpuChannelMessageQueue() { 121 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
122 DCHECK(channel_messages_.empty()); 122 DCHECK(channel_messages_.empty());
123 } 123 }
124 124
125 void GpuChannelMessageQueue::Destroy() { 125 void GpuChannelMessageQueue::Destroy() {
126 // We guarantee that the queue will no longer be modified after Destroy is
127 // called, it is now safe to modify the queue without the lock. All public
128 // facing modifying functions check enabled_ while all private modifying
129 // functions DCHECK(enabled_) to enforce this.
130 while (!channel_messages_.empty()) {
131 const IPC::Message& msg = channel_messages_.front()->message;
132 if (msg.is_sync()) {
133 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg);
134 reply->set_reply_error();
135 channel_->Send(reply);
136 }
137 channel_messages_.pop_front();
138 }
139
126 sync_point_order_data_->Destroy(); 140 sync_point_order_data_->Destroy();
127 141
128 if (preempting_flag_) 142 if (preempting_flag_)
129 preempting_flag_->Reset(); 143 preempting_flag_->Reset();
130 144
131 // Destroy timer on io thread. 145 // Destroy timer on io thread.
132 io_task_runner_->PostTask( 146 io_task_runner_->PostTask(
133 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {}, 147 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {},
134 base::Passed(&timer_))); 148 base::Passed(&timer_)));
135 149
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
418 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); 432 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING);
419 DCHECK(!scheduled_); 433 DCHECK(!scheduled_);
420 434
421 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 435 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
422 preempting_flag_->Reset(); 436 preempting_flag_->Reset();
423 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 437 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
424 } 438 }
425 439
426 GpuChannelMessageFilter::GpuChannelMessageFilter( 440 GpuChannelMessageFilter::GpuChannelMessageFilter(
427 GpuChannel* gpu_channel, 441 GpuChannel* gpu_channel,
428 Scheduler* scheduler,
429 scoped_refptr<GpuChannelMessageQueue> message_queue, 442 scoped_refptr<GpuChannelMessageQueue> message_queue,
430 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) 443 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
431 : gpu_channel_(gpu_channel), 444 : gpu_channel_(gpu_channel),
432 scheduler_(scheduler),
433 message_queue_(std::move(message_queue)), 445 message_queue_(std::move(message_queue)),
434 main_task_runner_(std::move(main_task_runner)) {} 446 main_task_runner_(std::move(main_task_runner)) {}
435 447
436 GpuChannelMessageFilter::~GpuChannelMessageFilter() { 448 GpuChannelMessageFilter::~GpuChannelMessageFilter() {
437 DCHECK(!gpu_channel_); 449 DCHECK(!gpu_channel_);
438 } 450 }
439 451
440 void GpuChannelMessageFilter::Destroy() { 452 void GpuChannelMessageFilter::Destroy() {
441 base::AutoLock auto_lock(gpu_channel_lock_); 453 base::AutoLock auto_lock(gpu_channel_lock_);
442 gpu_channel_ = nullptr; 454 gpu_channel_ = nullptr;
443 } 455 }
444 456
445 void GpuChannelMessageFilter::AddRoute(int32_t route_id,
446 SequenceId sequence_id) {
447 base::AutoLock auto_lock(gpu_channel_lock_);
448 DCHECK(gpu_channel_);
449 DCHECK(scheduler_);
450 route_sequences_[route_id] = sequence_id;
451 }
452
453 void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) {
454 base::AutoLock auto_lock(gpu_channel_lock_);
455 DCHECK(gpu_channel_);
456 DCHECK(scheduler_);
457 route_sequences_.erase(route_id);
458 }
459
460 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { 457 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) {
461 DCHECK(!ipc_channel_); 458 DCHECK(!ipc_channel_);
462 ipc_channel_ = channel; 459 ipc_channel_ = channel;
463 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
464 filter->OnFilterAdded(ipc_channel_); 461 filter->OnFilterAdded(ipc_channel_);
465 } 462 }
466 463
467 void GpuChannelMessageFilter::OnFilterRemoved() { 464 void GpuChannelMessageFilter::OnFilterRemoved() {
468 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) 465 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
469 filter->OnFilterRemoved(); 466 filter->OnFilterRemoved();
(...skipping 30 matching lines...) Expand all
500 void GpuChannelMessageFilter::RemoveChannelFilter( 497 void GpuChannelMessageFilter::RemoveChannelFilter(
501 scoped_refptr<IPC::MessageFilter> filter) { 498 scoped_refptr<IPC::MessageFilter> filter) {
502 if (ipc_channel_) 499 if (ipc_channel_)
503 filter->OnFilterRemoved(); 500 filter->OnFilterRemoved();
504 base::Erase(channel_filters_, filter); 501 base::Erase(channel_filters_, filter);
505 } 502 }
506 503
507 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { 504 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
508 DCHECK(ipc_channel_); 505 DCHECK(ipc_channel_);
509 506
510 if (!gpu_channel_)
511 return MessageErrorHandler(message, "Channel destroyed");
512
513 if (message.should_unblock() || message.is_reply()) 507 if (message.should_unblock() || message.is_reply())
514 return MessageErrorHandler(message, "Unexpected message type"); 508 return MessageErrorHandler(message, "Unexpected message type");
515 509
516 if (message.type() == GpuChannelMsg_Nop::ID) { 510 if (message.type() == GpuChannelMsg_Nop::ID) {
517 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 511 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
518 Send(reply); 512 Send(reply);
519 return true; 513 return true;
520 } 514 }
521 515
522 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 516 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
523 if (filter->OnMessageReceived(message)) 517 if (filter->OnMessageReceived(message))
524 return true; 518 return true;
525 } 519 }
526 520
527 base::AutoLock auto_lock(gpu_channel_lock_); 521 base::AutoLock auto_lock(gpu_channel_lock_);
528 if (!gpu_channel_) 522 if (!gpu_channel_)
529 return MessageErrorHandler(message, "Channel destroyed"); 523 return MessageErrorHandler(message, "Channel destroyed");
530 524
531 if (message.routing_id() == MSG_ROUTING_CONTROL || 525 if (message.routing_id() == MSG_ROUTING_CONTROL ||
532 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || 526 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
533 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { 527 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
534 // It's OK to post task that may never run even for sync messages, because 528 // It's OK to post task that may never run even for sync messages, because
535 // if the channel is destroyed, the client Send will fail. 529 // if the channel is destroyed, the client Send will fail.
536 main_task_runner_->PostTask(FROM_HERE, 530 main_task_runner_->PostTask(FROM_HERE,
537 base::Bind(&GpuChannel::HandleOutOfOrderMessage, 531 base::Bind(&GpuChannel::HandleOutOfOrderMessage,
538 gpu_channel_->AsWeakPtr(), message)); 532 gpu_channel_->AsWeakPtr(), message));
539 } else if (scheduler_) {
540 SequenceId sequence_id = route_sequences_[message.routing_id()];
541 if (sequence_id.is_null())
542 return MessageErrorHandler(message, "Invalid route");
543
544 std::vector<SyncToken> sync_token_fences;
545 if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) {
546 GpuCommandBufferMsg_AsyncFlush::Param params;
547 if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, &params))
548 return MessageErrorHandler(message, "Invalid flush message");
549 sync_token_fences = std::get<3>(params);
550 }
551
552 scheduler_->ScheduleTask(sequence_id,
553 base::BindOnce(&GpuChannel::HandleMessage,
554 gpu_channel_->AsWeakPtr(), message),
555 sync_token_fences);
556 } else { 533 } else {
557 // Message queue takes care of PostTask. 534 // Message queue takes care of PostTask.
558 message_queue_->PushBackMessage(message); 535 message_queue_->PushBackMessage(message);
559 } 536 }
560 537
561 return true; 538 return true;
562 } 539 }
563 540
564 bool GpuChannelMessageFilter::Send(IPC::Message* message) { 541 bool GpuChannelMessageFilter::Send(IPC::Message* message) {
565 return ipc_channel_->Send(message); 542 return ipc_channel_->Send(message);
(...skipping 11 matching lines...) Expand all
577 } 554 }
578 555
579 // Definitions for constructor and destructor of this interface are needed to 556 // Definitions for constructor and destructor of this interface are needed to
580 // avoid MSVC LNK2019. 557 // avoid MSVC LNK2019.
581 FilteredSender::FilteredSender() = default; 558 FilteredSender::FilteredSender() = default;
582 559
583 FilteredSender::~FilteredSender() = default; 560 FilteredSender::~FilteredSender() = default;
584 561
585 GpuChannel::GpuChannel( 562 GpuChannel::GpuChannel(
586 GpuChannelManager* gpu_channel_manager, 563 GpuChannelManager* gpu_channel_manager,
587 Scheduler* scheduler,
588 SyncPointManager* sync_point_manager, 564 SyncPointManager* sync_point_manager,
589 GpuWatchdogThread* watchdog, 565 GpuWatchdogThread* watchdog,
590 scoped_refptr<gl::GLShareGroup> share_group, 566 scoped_refptr<gl::GLShareGroup> share_group,
591 scoped_refptr<gles2::MailboxManager> mailbox_manager, 567 scoped_refptr<gles2::MailboxManager> mailbox_manager,
592 scoped_refptr<PreemptionFlag> preempting_flag, 568 scoped_refptr<PreemptionFlag> preempting_flag,
593 scoped_refptr<PreemptionFlag> preempted_flag, 569 scoped_refptr<PreemptionFlag> preempted_flag,
594 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 570 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
595 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, 571 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
596 int32_t client_id, 572 int32_t client_id,
597 uint64_t client_tracing_id, 573 uint64_t client_tracing_id,
598 bool is_gpu_host) 574 bool is_gpu_host)
599 : gpu_channel_manager_(gpu_channel_manager), 575 : gpu_channel_manager_(gpu_channel_manager),
600 scheduler_(scheduler),
601 sync_point_manager_(sync_point_manager), 576 sync_point_manager_(sync_point_manager),
602 preempting_flag_(preempting_flag), 577 preempting_flag_(preempting_flag),
603 preempted_flag_(preempted_flag), 578 preempted_flag_(preempted_flag),
604 client_id_(client_id), 579 client_id_(client_id),
605 client_tracing_id_(client_tracing_id), 580 client_tracing_id_(client_tracing_id),
606 task_runner_(task_runner), 581 task_runner_(task_runner),
607 io_task_runner_(io_task_runner), 582 io_task_runner_(io_task_runner),
608 share_group_(share_group), 583 share_group_(share_group),
609 mailbox_manager_(mailbox_manager), 584 mailbox_manager_(mailbox_manager),
610 watchdog_(watchdog), 585 watchdog_(watchdog),
611 is_gpu_host_(is_gpu_host), 586 is_gpu_host_(is_gpu_host),
612 weak_factory_(this) { 587 weak_factory_(this) {
613 DCHECK(gpu_channel_manager_); 588 DCHECK(gpu_channel_manager);
614 DCHECK(client_id_); 589 DCHECK(client_id);
615 590
616 if (!scheduler_) { 591 message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner,
617 message_queue_ = new GpuChannelMessageQueue( 592 preempting_flag, preempted_flag,
618 this, sync_point_manager->CreateSyncPointOrderData(), task_runner, 593 sync_point_manager);
619 io_task_runner, preempting_flag, preempted_flag);
620 }
621 594
622 filter_ = 595 filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner);
623 new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner);
624 } 596 }
625 597
626 GpuChannel::~GpuChannel() { 598 GpuChannel::~GpuChannel() {
627 // Clear stubs first because of dependencies. 599 // Clear stubs first because of dependencies.
628 stubs_.clear(); 600 stubs_.clear();
629 601
630 // Destroy filter first so that no message queue gets no more messages. 602 // Destroy filter first so that no message queue gets no more messages.
631 filter_->Destroy(); 603 filter_->Destroy();
632 604
633 if (scheduler_) { 605 message_queue_->Destroy();
634 for (const auto& kv : stream_sequences_)
635 scheduler_->DestroySequence(kv.second);
636 } else {
637 message_queue_->Destroy();
638 }
639 606
640 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet()); 607 DCHECK(!preempting_flag_ || !preempting_flag_->IsSet());
641 } 608 }
642 609
643 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) { 610 void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) {
644 channel_ = std::move(channel); 611 channel_ = std::move(channel);
645 channel_->AddFilter(filter_.get()); 612 channel_->AddFilter(filter_.get());
646 } 613 }
647 614
648 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { 615 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
682 649
683 if (!channel_) { 650 if (!channel_) {
684 delete message; 651 delete message;
685 return false; 652 return false;
686 } 653 }
687 654
688 return channel_->Send(message); 655 return channel_->Send(message);
689 } 656 }
690 657
691 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) { 658 void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) {
692 if (scheduler_) { 659 message_queue_->SetScheduled(true);
693 scheduler_->EnableSequence(stub->sequence_id()); 660 // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence.
694 } else {
695 message_queue_->SetScheduled(true);
696 }
697 } 661 }
698 662
699 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) { 663 void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) {
700 if (scheduler_) { 664 message_queue_->SetScheduled(false);
701 scheduler_->DisableSequence(stub->sequence_id()); 665 // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence.
702 } else {
703 message_queue_->SetScheduled(false);
704 }
705 } 666 }
706 667
707 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { 668 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
708 auto it = stubs_.find(route_id); 669 auto it = stubs_.find(route_id);
709 if (it == stubs_.end()) 670 if (it == stubs_.end())
710 return nullptr; 671 return nullptr;
711 672
712 return it->second.get(); 673 return it->second.get();
713 } 674 }
714 675
715 void GpuChannel::LoseAllContexts() { 676 void GpuChannel::LoseAllContexts() {
716 gpu_channel_manager_->LoseAllContexts(); 677 gpu_channel_manager_->LoseAllContexts();
717 } 678 }
718 679
719 void GpuChannel::MarkAllContextsLost() { 680 void GpuChannel::MarkAllContextsLost() {
720 for (auto& kv : stubs_) 681 for (auto& kv : stubs_)
721 kv.second->MarkContextLost(); 682 kv.second->MarkContextLost();
722 } 683 }
723 684
724 bool GpuChannel::AddRoute(int32_t route_id, 685 bool GpuChannel::AddRoute(int32_t route_id,
725 SequenceId sequence_id, 686 SequenceId sequence_id,
726 IPC::Listener* listener) { 687 IPC::Listener* listener) {
727 if (scheduler_) 688 // TODO(sunnyps): Add route id to sequence id mapping to filter.
728 filter_->AddRoute(route_id, sequence_id);
729 return router_.AddRoute(route_id, listener); 689 return router_.AddRoute(route_id, listener);
730 } 690 }
731 691
732 void GpuChannel::RemoveRoute(int32_t route_id) { 692 void GpuChannel::RemoveRoute(int32_t route_id) {
733 if (scheduler_)
734 filter_->RemoveRoute(route_id);
735 router_.RemoveRoute(route_id); 693 router_.RemoveRoute(route_id);
736 } 694 }
737 695
738 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 696 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
739 bool handled = true; 697 bool handled = true;
740 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 698 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
741 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer, 699 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer,
742 OnCreateCommandBuffer) 700 OnCreateCommandBuffer)
743 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, 701 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
744 OnDestroyCommandBuffer) 702 OnDestroyCommandBuffer)
745 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, 703 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds,
746 OnGetDriverBugWorkArounds) 704 OnGetDriverBugWorkArounds)
747 IPC_MESSAGE_UNHANDLED(handled = false) 705 IPC_MESSAGE_UNHANDLED(handled = false)
748 IPC_END_MESSAGE_MAP() 706 IPC_END_MESSAGE_MAP()
749 return handled; 707 return handled;
750 } 708 }
751 709
752 void GpuChannel::HandleMessage(const IPC::Message& msg) {
753 int32_t routing_id = msg.routing_id();
754 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
755
756 DCHECK(!stub || stub->IsScheduled());
757
758 DVLOG(1) << "received message @" << &msg << " on channel @" << this
759 << " with type " << msg.type();
760
761 HandleMessageHelper(msg);
762
763 // If we get descheduled or yield while processing a message.
764 if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
765 DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
766 (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
767 scheduler_->ContinueTask(
768 stub->sequence_id(),
769 base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg));
770 }
771 }
772
773 void GpuChannel::HandleMessageOnQueue() { 710 void GpuChannel::HandleMessageOnQueue() {
774 const GpuChannelMessage* channel_msg = 711 const GpuChannelMessage* channel_msg =
775 message_queue_->BeginMessageProcessing(); 712 message_queue_->BeginMessageProcessing();
776 if (!channel_msg) 713 if (!channel_msg)
777 return; 714 return;
778 715
779 const IPC::Message& msg = channel_msg->message; 716 const IPC::Message& msg = channel_msg->message;
780 int32_t routing_id = msg.routing_id(); 717 int32_t routing_id = msg.routing_id();
781 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); 718 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
782 719
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 *capabilities = gpu::Capabilities(); 792 *capabilities = gpu::Capabilities();
856 } 793 }
857 } 794 }
858 795
859 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( 796 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
860 const GPUCreateCommandBufferConfig& init_params, 797 const GPUCreateCommandBufferConfig& init_params,
861 int32_t route_id, 798 int32_t route_id,
862 std::unique_ptr<base::SharedMemory> shared_state_shm) { 799 std::unique_ptr<base::SharedMemory> shared_state_shm) {
863 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) { 800 if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) {
864 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " 801 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a "
865 "view context on a non-privileged channel"; 802 "view context on a non-priviledged channel";
866 return nullptr; 803 return nullptr;
867 } 804 }
868 805
869 int32_t share_group_id = init_params.share_group_id; 806 int32_t share_group_id = init_params.share_group_id;
870 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id); 807 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
871 808
872 if (!share_group && share_group_id != MSG_ROUTING_NONE) { 809 if (!share_group && share_group_id != MSG_ROUTING_NONE) {
873 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id"; 810 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id";
874 return nullptr; 811 return nullptr;
875 } 812 }
876 813
877 int32_t stream_id = init_params.stream_id; 814 int32_t stream_id = init_params.stream_id;
878 if (share_group && stream_id != share_group->stream_id()) { 815 if (share_group && stream_id != share_group->stream_id()) {
879 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not " 816 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not "
880 "match share group stream id"; 817 "match share group stream id";
881 return nullptr; 818 return nullptr;
882 } 819 }
883 820
884 SchedulingPriority stream_priority = init_params.stream_priority; 821 GpuStreamPriority stream_priority = init_params.stream_priority;
885 if (stream_priority <= SchedulingPriority::kHigh && !is_gpu_host_) { 822 if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) {
886 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream " 823 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream "
887 "not allowed on a non-privileged channel"; 824 "priority not allowed";
888 return nullptr; 825 return nullptr;
889 } 826 }
890 827
891 if (share_group && !share_group->decoder()) { 828 if (share_group && !share_group->decoder()) {
892 // This should catch test errors where we did not Initialize the 829 // This should catch test errors where we did not Initialize the
893 // share_group's CommandBuffer. 830 // share_group's CommandBuffer.
894 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 831 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
895 "not initialized"; 832 "not initialized";
896 return nullptr; 833 return nullptr;
897 } 834 }
898 835
899 if (share_group && share_group->decoder()->WasContextLost()) { 836 if (share_group && share_group->decoder()->WasContextLost()) {
900 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was " 837 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): shared context was "
901 "already lost"; 838 "already lost";
902 return nullptr; 839 return nullptr;
903 } 840 }
904 841
905 CommandBufferId command_buffer_id = 842 CommandBufferId command_buffer_id =
906 GenerateCommandBufferId(client_id_, route_id); 843 GenerateCommandBufferId(client_id_, route_id);
907 844
908 SequenceId sequence_id; 845 // TODO(sunnyps): Lookup sequence id using stream id to sequence id map.
909 if (scheduler_) { 846 SequenceId sequence_id = message_queue_->sequence_id();
910 sequence_id = stream_sequences_[stream_id];
911 if (sequence_id.is_null()) {
912 sequence_id = scheduler_->CreateSequence(stream_priority);
913 stream_sequences_[stream_id] = sequence_id;
914 }
915 } else {
916 sequence_id = message_queue_->sequence_id();
917 }
918 847
919 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create( 848 std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
920 this, share_group, init_params, command_buffer_id, sequence_id, stream_id, 849 this, share_group, init_params, command_buffer_id, sequence_id, stream_id,
921 route_id, std::move(shared_state_shm))); 850 route_id, std::move(shared_state_shm)));
922 851
923 if (!AddRoute(route_id, sequence_id, stub.get())) { 852 if (!AddRoute(route_id, sequence_id, stub.get())) {
924 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route"; 853 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route";
925 return nullptr; 854 return nullptr;
926 } 855 }
927 856
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 946
1018 return manager->gpu_memory_buffer_factory() 947 return manager->gpu_memory_buffer_factory()
1019 ->AsImageFactory() 948 ->AsImageFactory()
1020 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, 949 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat,
1021 client_id_, surface_handle); 950 client_id_, surface_handle);
1022 } 951 }
1023 } 952 }
1024 } 953 }
1025 954
1026 } // namespace gpu 955 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698