Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(245)

Unified Diff: gpu/ipc/service/gpu_channel.cc

Issue 2881813002: Revert of gpu: GPU service scheduler. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: gpu/ipc/service/gpu_channel.cc
diff --git a/gpu/ipc/service/gpu_channel.cc b/gpu/ipc/service/gpu_channel.cc
index 37781d45f01464ec5d19edaa2d8e3e98b6acdf2b..1d67b33ae2d0022e75a54dbdefcb57551c25c2d9 100644
--- a/gpu/ipc/service/gpu_channel.cc
+++ b/gpu/ipc/service/gpu_channel.cc
@@ -34,7 +34,6 @@
#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/preemption_flag.h"
-#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
@@ -100,20 +99,21 @@
GpuChannelMessageQueue::GpuChannelMessageQueue(
GpuChannel* channel,
- scoped_refptr<SyncPointOrderData> sync_point_order_data,
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<PreemptionFlag> preempting_flag,
- scoped_refptr<PreemptionFlag> preempted_flag)
+ scoped_refptr<PreemptionFlag> preempted_flag,
+ SyncPointManager* sync_point_manager)
: channel_(channel),
max_preemption_time_(
base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)),
timer_(new base::OneShotTimer),
- sync_point_order_data_(std::move(sync_point_order_data)),
+ sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()),
main_task_runner_(std::move(main_task_runner)),
io_task_runner_(std::move(io_task_runner)),
preempting_flag_(std::move(preempting_flag)),
- preempted_flag_(std::move(preempted_flag)) {
+ preempted_flag_(std::move(preempted_flag)),
+ sync_point_manager_(sync_point_manager) {
timer_->SetTaskRunner(io_task_runner_);
io_thread_checker_.DetachFromThread();
}
@@ -123,6 +123,20 @@
}
void GpuChannelMessageQueue::Destroy() {
+ // We guarantee that the queue will no longer be modified after Destroy is
+ // called, it is now safe to modify the queue without the lock. All public
+ // facing modifying functions check enabled_ while all private modifying
+ // functions DCHECK(enabled_) to enforce this.
+ while (!channel_messages_.empty()) {
+ const IPC::Message& msg = channel_messages_.front()->message;
+ if (msg.is_sync()) {
+ IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg);
+ reply->set_reply_error();
+ channel_->Send(reply);
+ }
+ channel_messages_.pop_front();
+ }
+
sync_point_order_data_->Destroy();
if (preempting_flag_)
@@ -425,11 +439,9 @@
GpuChannelMessageFilter::GpuChannelMessageFilter(
GpuChannel* gpu_channel,
- Scheduler* scheduler,
scoped_refptr<GpuChannelMessageQueue> message_queue,
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
: gpu_channel_(gpu_channel),
- scheduler_(scheduler),
message_queue_(std::move(message_queue)),
main_task_runner_(std::move(main_task_runner)) {}
@@ -440,21 +452,6 @@
void GpuChannelMessageFilter::Destroy() {
base::AutoLock auto_lock(gpu_channel_lock_);
gpu_channel_ = nullptr;
-}
-
-void GpuChannelMessageFilter::AddRoute(int32_t route_id,
- SequenceId sequence_id) {
- base::AutoLock auto_lock(gpu_channel_lock_);
- DCHECK(gpu_channel_);
- DCHECK(scheduler_);
- route_sequences_[route_id] = sequence_id;
-}
-
-void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) {
- base::AutoLock auto_lock(gpu_channel_lock_);
- DCHECK(gpu_channel_);
- DCHECK(scheduler_);
- route_sequences_.erase(route_id);
}
void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) {
@@ -506,9 +503,6 @@
bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
DCHECK(ipc_channel_);
-
- if (!gpu_channel_)
- return MessageErrorHandler(message, "Channel destroyed");
if (message.should_unblock() || message.is_reply())
return MessageErrorHandler(message, "Unexpected message type");
@@ -536,23 +530,6 @@
main_task_runner_->PostTask(FROM_HERE,
base::Bind(&GpuChannel::HandleOutOfOrderMessage,
gpu_channel_->AsWeakPtr(), message));
- } else if (scheduler_) {
- SequenceId sequence_id = route_sequences_[message.routing_id()];
- if (sequence_id.is_null())
- return MessageErrorHandler(message, "Invalid route");
-
- std::vector<SyncToken> sync_token_fences;
- if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) {
- GpuCommandBufferMsg_AsyncFlush::Param params;
- if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, &params))
- return MessageErrorHandler(message, "Invalid flush message");
- sync_token_fences = std::get<3>(params);
- }
-
- scheduler_->ScheduleTask(sequence_id,
- base::BindOnce(&GpuChannel::HandleMessage,
- gpu_channel_->AsWeakPtr(), message),
- sync_token_fences);
} else {
// Message queue takes care of PostTask.
message_queue_->PushBackMessage(message);
@@ -584,7 +561,6 @@
GpuChannel::GpuChannel(
GpuChannelManager* gpu_channel_manager,
- Scheduler* scheduler,
SyncPointManager* sync_point_manager,
GpuWatchdogThread* watchdog,
scoped_refptr<gl::GLShareGroup> share_group,
@@ -597,7 +573,6 @@
uint64_t client_tracing_id,
bool is_gpu_host)
: gpu_channel_manager_(gpu_channel_manager),
- scheduler_(scheduler),
sync_point_manager_(sync_point_manager),
preempting_flag_(preempting_flag),
preempted_flag_(preempted_flag),
@@ -610,17 +585,14 @@
watchdog_(watchdog),
is_gpu_host_(is_gpu_host),
weak_factory_(this) {
- DCHECK(gpu_channel_manager_);
- DCHECK(client_id_);
-
- if (!scheduler_) {
- message_queue_ = new GpuChannelMessageQueue(
- this, sync_point_manager->CreateSyncPointOrderData(), task_runner,
- io_task_runner, preempting_flag, preempted_flag);
- }
-
- filter_ =
- new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner);
+ DCHECK(gpu_channel_manager);
+ DCHECK(client_id);
+
+ message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner,
+ preempting_flag, preempted_flag,
+ sync_point_manager);
+
+ filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner);
}
GpuChannel::~GpuChannel() {
@@ -630,12 +602,7 @@
// Destroy filter first so that no message queue gets no more messages.
filter_->Destroy();
- if (scheduler_) {
- for (const auto& kv : stream_sequences_)
- scheduler_->DestroySequence(kv.second);
- } else {
- message_queue_->Destroy();
- }
+ message_queue_->Destroy();
DCHECK(!preempting_flag_ || !preempting_flag_->IsSet());
}
@@ -689,19 +656,13 @@
}
void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) {
- if (scheduler_) {
- scheduler_->EnableSequence(stub->sequence_id());
- } else {
- message_queue_->SetScheduled(true);
- }
+ message_queue_->SetScheduled(true);
+ // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence.
}
void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) {
- if (scheduler_) {
- scheduler_->DisableSequence(stub->sequence_id());
- } else {
- message_queue_->SetScheduled(false);
- }
+ message_queue_->SetScheduled(false);
+ // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence.
}
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
@@ -724,14 +685,11 @@
bool GpuChannel::AddRoute(int32_t route_id,
SequenceId sequence_id,
IPC::Listener* listener) {
- if (scheduler_)
- filter_->AddRoute(route_id, sequence_id);
+ // TODO(sunnyps): Add route id to sequence id mapping to filter.
return router_.AddRoute(route_id, listener);
}
void GpuChannel::RemoveRoute(int32_t route_id) {
- if (scheduler_)
- filter_->RemoveRoute(route_id);
router_.RemoveRoute(route_id);
}
@@ -747,27 +705,6 @@
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
-}
-
-void GpuChannel::HandleMessage(const IPC::Message& msg) {
- int32_t routing_id = msg.routing_id();
- GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
-
- DCHECK(!stub || stub->IsScheduled());
-
- DVLOG(1) << "received message @" << &msg << " on channel @" << this
- << " with type " << msg.type();
-
- HandleMessageHelper(msg);
-
- // If we get descheduled or yield while processing a message.
- if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
- DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
- (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
- scheduler_->ContinueTask(
- stub->sequence_id(),
- base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg));
- }
}
void GpuChannel::HandleMessageOnQueue() {
@@ -862,7 +799,7 @@
std::unique_ptr<base::SharedMemory> shared_state_shm) {
if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) {
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a "
- "view context on a non-privileged channel";
+ "view context on a non-priviledged channel";
return nullptr;
}
@@ -881,10 +818,10 @@
return nullptr;
}
- SchedulingPriority stream_priority = init_params.stream_priority;
- if (stream_priority <= SchedulingPriority::kHigh && !is_gpu_host_) {
- DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream "
- "not allowed on a non-privileged channel";
+ GpuStreamPriority stream_priority = init_params.stream_priority;
+ if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) {
+ DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream "
+ "priority not allowed";
return nullptr;
}
@@ -905,16 +842,8 @@
CommandBufferId command_buffer_id =
GenerateCommandBufferId(client_id_, route_id);
- SequenceId sequence_id;
- if (scheduler_) {
- sequence_id = stream_sequences_[stream_id];
- if (sequence_id.is_null()) {
- sequence_id = scheduler_->CreateSequence(stream_priority);
- stream_sequences_[stream_id] = sequence_id;
- }
- } else {
- sequence_id = message_queue_->sequence_id();
- }
+ // TODO(sunnyps): Lookup sequence id using stream id to sequence id map.
+ SequenceId sequence_id = message_queue_->sequence_id();
std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
this, share_group, init_params, command_buffer_id, sequence_id, stream_id,
« no previous file with comments | « gpu/ipc/service/gpu_channel.h ('k') | gpu/ipc/service/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698