Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/service/gpu_channel.h" | 5 #include "gpu/ipc/service/gpu_channel.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #if defined(OS_WIN) | 9 #if defined(OS_WIN) |
| 10 #include <windows.h> | 10 #include <windows.h> |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 81 channel, std::move(io_task_runner), std::move(preempting_flag), | 81 channel, std::move(io_task_runner), std::move(preempting_flag), |
| 82 std::move(preempted_flag), sync_point_manager); | 82 std::move(preempted_flag), sync_point_manager); |
| 83 } | 83 } |
| 84 | 84 |
| 85 GpuChannelMessageQueue::GpuChannelMessageQueue( | 85 GpuChannelMessageQueue::GpuChannelMessageQueue( |
| 86 GpuChannel* channel, | 86 GpuChannel* channel, |
| 87 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, | 87 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, |
| 88 scoped_refptr<PreemptionFlag> preempting_flag, | 88 scoped_refptr<PreemptionFlag> preempting_flag, |
| 89 scoped_refptr<PreemptionFlag> preempted_flag, | 89 scoped_refptr<PreemptionFlag> preempted_flag, |
| 90 SyncPointManager* sync_point_manager) | 90 SyncPointManager* sync_point_manager) |
| 91 : enabled_(true), | 91 : scheduled_(true), |
| 92 scheduled_(true), | |
| 93 channel_(channel), | 92 channel_(channel), |
| 94 preemption_state_(IDLE), | 93 preemption_state_(IDLE), |
| 95 max_preemption_time_( | 94 max_preemption_time_( |
| 96 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), | 95 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), |
| 97 timer_(new base::OneShotTimer), | 96 timer_(new base::OneShotTimer), |
| 98 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), | 97 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), |
| 99 io_task_runner_(std::move(io_task_runner)), | 98 io_task_runner_(std::move(io_task_runner)), |
| 100 preempting_flag_(std::move(preempting_flag)), | 99 preempting_flag_(std::move(preempting_flag)), |
| 101 preempted_flag_(std::move(preempted_flag)), | 100 preempted_flag_(std::move(preempted_flag)), |
| 102 sync_point_manager_(sync_point_manager) { | 101 sync_point_manager_(sync_point_manager) { |
| 103 timer_->SetTaskRunner(io_task_runner_); | 102 timer_->SetTaskRunner(io_task_runner_); |
| 104 io_thread_checker_.DetachFromThread(); | 103 io_thread_checker_.DetachFromThread(); |
| 105 } | 104 } |
| 106 | 105 |
| 107 GpuChannelMessageQueue::~GpuChannelMessageQueue() { | 106 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| 108 DCHECK(!enabled_); | |
| 109 DCHECK(channel_messages_.empty()); | 107 DCHECK(channel_messages_.empty()); |
| 110 } | 108 } |
| 111 | 109 |
| 112 void GpuChannelMessageQueue::Disable() { | 110 void GpuChannelMessageQueue::Destroy() { |
| 113 { | |
| 114 base::AutoLock auto_lock(channel_lock_); | |
| 115 DCHECK(enabled_); | |
| 116 enabled_ = false; | |
| 117 } | |
| 118 | |
| 119 // We guarantee that the queues will no longer be modified after enabled_ | 111 // We guarantee that the queues will no longer be modified after enabled_ |
| 120 // is set to false, it is now safe to modify the queue without the lock. | 112 // is set to false, it is now safe to modify the queue without the lock. |
| 121 // All public facing modifying functions check enabled_ while all | 113 // All public facing modifying functions check enabled_ while all |
| 122 // private modifying functions DCHECK(enabled_) to enforce this. | 114 // private modifying functions DCHECK(enabled_) to enforce this. |
| 123 while (!channel_messages_.empty()) { | 115 while (!channel_messages_.empty()) { |
| 124 const IPC::Message& msg = channel_messages_.front()->message; | 116 const IPC::Message& msg = channel_messages_.front()->message; |
| 125 if (msg.is_sync()) { | 117 if (msg.is_sync()) { |
| 126 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); | 118 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
| 127 reply->set_reply_error(); | 119 reply->set_reply_error(); |
| 128 channel_->Send(reply); | 120 channel_->Send(reply); |
| 129 } | 121 } |
| 130 channel_messages_.pop_front(); | 122 channel_messages_.pop_front(); |
| 131 } | 123 } |
| 132 | 124 |
| 133 if (sync_point_order_data_) { | 125 sync_point_order_data_->Destroy(); |
| 134 sync_point_order_data_->Destroy(); | |
| 135 sync_point_order_data_ = nullptr; | |
| 136 } | |
| 137 | 126 |
| 127 // Destroy timer on io thread. | |
| 138 io_task_runner_->PostTask( | 128 io_task_runner_->PostTask( |
| 139 FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this)); | 129 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {}, |
| 140 } | 130 base::Passed(&timer_))); |
| 141 | |
| 142 void GpuChannelMessageQueue::DisableIO() { | |
| 143 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
| 144 timer_ = nullptr; | |
| 145 } | 131 } |
| 146 | 132 |
| 147 bool GpuChannelMessageQueue::IsScheduled() const { | 133 bool GpuChannelMessageQueue::IsScheduled() const { |
| 148 base::AutoLock lock(channel_lock_); | 134 base::AutoLock lock(channel_lock_); |
| 149 return scheduled_; | 135 return scheduled_; |
| 150 } | 136 } |
| 151 | 137 |
| 152 void GpuChannelMessageQueue::SetScheduled(bool scheduled) { | 138 void GpuChannelMessageQueue::SetScheduled(bool scheduled) { |
| 153 base::AutoLock lock(channel_lock_); | 139 base::AutoLock lock(channel_lock_); |
| 154 DCHECK(enabled_); | |
| 155 if (scheduled_ == scheduled) | 140 if (scheduled_ == scheduled) |
| 156 return; | 141 return; |
| 157 scheduled_ = scheduled; | 142 scheduled_ = scheduled; |
| 158 if (scheduled) | 143 if (scheduled) |
| 159 channel_->PostHandleMessage(); | 144 channel_->PostHandleMessageOnQueue(); |
| 160 if (preempting_flag_) { | 145 if (preempting_flag_) { |
| 161 io_task_runner_->PostTask( | 146 io_task_runner_->PostTask( |
| 162 FROM_HERE, | 147 FROM_HERE, |
| 163 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | 148 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 164 } | 149 } |
| 165 } | 150 } |
| 166 | 151 |
| 167 bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { | 152 void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| 168 base::AutoLock auto_lock(channel_lock_); | 153 base::AutoLock auto_lock(channel_lock_); |
| 169 if (enabled_) { | 154 uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(); |
| 170 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | 155 std::unique_ptr<GpuChannelMessage> msg( |
| 171 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | 156 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); |
| 172 channel_->PostHandleOutOfOrderMessage(message); | |
| 173 return true; | |
| 174 } | |
| 175 | 157 |
| 176 uint32_t order_num = | 158 if (channel_messages_.empty()) { |
| 177 sync_point_order_data_->GenerateUnprocessedOrderNumber(); | 159 DCHECK(scheduled_); |
| 178 std::unique_ptr<GpuChannelMessage> msg( | 160 channel_->PostHandleMessageOnQueue(); |
| 179 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); | 161 } |
| 180 | 162 |
| 181 if (channel_messages_.empty()) { | 163 channel_messages_.push_back(std::move(msg)); |
| 182 DCHECK(scheduled_); | |
| 183 channel_->PostHandleMessage(); | |
| 184 } | |
| 185 | 164 |
| 186 channel_messages_.push_back(std::move(msg)); | 165 if (preempting_flag_) |
| 187 | 166 UpdatePreemptionStateHelper(); |
| 188 if (preempting_flag_) | |
| 189 UpdatePreemptionStateHelper(); | |
| 190 | |
| 191 return true; | |
| 192 } | |
| 193 return false; | |
| 194 } | 167 } |
| 195 | 168 |
| 196 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { | 169 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { |
| 197 base::AutoLock auto_lock(channel_lock_); | 170 base::AutoLock auto_lock(channel_lock_); |
| 198 DCHECK(enabled_); | |
| 199 // If we have been preempted by another channel, just post a task to wake up. | 171 // If we have been preempted by another channel, just post a task to wake up. |
| 200 if (preempted_flag_ && preempted_flag_->IsSet()) { | 172 if (preempted_flag_ && preempted_flag_->IsSet()) { |
| 201 channel_->PostHandleMessage(); | 173 channel_->PostHandleMessageOnQueue(); |
| 202 return nullptr; | 174 return nullptr; |
| 203 } | 175 } |
| 204 if (channel_messages_.empty()) | 176 if (channel_messages_.empty()) |
| 205 return nullptr; | 177 return nullptr; |
| 206 sync_point_order_data_->BeginProcessingOrderNumber( | 178 sync_point_order_data_->BeginProcessingOrderNumber( |
| 207 channel_messages_.front()->order_number); | 179 channel_messages_.front()->order_number); |
| 208 return channel_messages_.front().get(); | 180 return channel_messages_.front().get(); |
| 209 } | 181 } |
| 210 | 182 |
| 211 void GpuChannelMessageQueue::PauseMessageProcessing() { | 183 void GpuChannelMessageQueue::PauseMessageProcessing() { |
| 212 base::AutoLock auto_lock(channel_lock_); | 184 base::AutoLock auto_lock(channel_lock_); |
| 213 DCHECK(!channel_messages_.empty()); | 185 DCHECK(!channel_messages_.empty()); |
| 214 | 186 |
| 215 // If we have been preempted by another channel, just post a task to wake up. | 187 // If we have been preempted by another channel, just post a task to wake up. |
| 216 if (scheduled_) | 188 if (scheduled_) |
| 217 channel_->PostHandleMessage(); | 189 channel_->PostHandleMessageOnQueue(); |
| 218 | 190 |
| 219 sync_point_order_data_->PauseProcessingOrderNumber( | 191 sync_point_order_data_->PauseProcessingOrderNumber( |
| 220 channel_messages_.front()->order_number); | 192 channel_messages_.front()->order_number); |
| 221 } | 193 } |
| 222 | 194 |
| 223 void GpuChannelMessageQueue::FinishMessageProcessing() { | 195 void GpuChannelMessageQueue::FinishMessageProcessing() { |
| 224 base::AutoLock auto_lock(channel_lock_); | 196 base::AutoLock auto_lock(channel_lock_); |
| 225 DCHECK(!channel_messages_.empty()); | 197 DCHECK(!channel_messages_.empty()); |
| 226 DCHECK(scheduled_); | 198 DCHECK(scheduled_); |
| 227 | 199 |
| 228 sync_point_order_data_->FinishProcessingOrderNumber( | 200 sync_point_order_data_->FinishProcessingOrderNumber( |
| 229 channel_messages_.front()->order_number); | 201 channel_messages_.front()->order_number); |
| 230 channel_messages_.pop_front(); | 202 channel_messages_.pop_front(); |
| 231 | 203 |
| 232 if (!channel_messages_.empty()) | 204 if (!channel_messages_.empty()) |
| 233 channel_->PostHandleMessage(); | 205 channel_->PostHandleMessageOnQueue(); |
| 234 | 206 |
| 235 if (preempting_flag_) { | 207 if (preempting_flag_) { |
| 236 io_task_runner_->PostTask( | 208 io_task_runner_->PostTask( |
| 237 FROM_HERE, | 209 FROM_HERE, |
| 238 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | 210 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 239 } | 211 } |
| 240 } | 212 } |
| 241 | 213 |
| 242 void GpuChannelMessageQueue::UpdatePreemptionState() { | 214 void GpuChannelMessageQueue::UpdatePreemptionState() { |
| 243 DCHECK(io_thread_checker_.CalledOnValidThread()); | 215 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 423 DCHECK(preempting_flag_); | 395 DCHECK(preempting_flag_); |
| 424 channel_lock_.AssertAcquired(); | 396 channel_lock_.AssertAcquired(); |
| 425 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); | 397 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); |
| 426 DCHECK(!scheduled_); | 398 DCHECK(!scheduled_); |
| 427 | 399 |
| 428 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | 400 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
| 429 preempting_flag_->Reset(); | 401 preempting_flag_->Reset(); |
| 430 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | 402 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 431 } | 403 } |
| 432 | 404 |
| 433 GpuChannelMessageFilter::GpuChannelMessageFilter( | 405 GpuChannelMessageFilter::GpuChannelMessageFilter(GpuChannel* gpu_channel) |
| 434 scoped_refptr<GpuChannelMessageQueue> message_queue) | 406 : gpu_channel_(gpu_channel) {} |
| 435 : message_queue_(std::move(message_queue)), | |
| 436 channel_(nullptr), | |
| 437 peer_pid_(base::kNullProcessId) {} | |
| 438 | 407 |
| 439 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} | 408 GpuChannelMessageFilter::~GpuChannelMessageFilter() { |
| 409 DCHECK(!gpu_channel_); | |
| 410 } | |
| 411 | |
| 412 void GpuChannelMessageFilter::Destroy() { | |
| 413 base::AutoLock auto_lock(lock_); | |
| 414 gpu_channel_ = nullptr; | |
| 415 } | |
| 440 | 416 |
| 441 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { | 417 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { |
| 442 DCHECK(!channel_); | 418 base::AutoLock auto_lock(lock_); |
| 443 channel_ = channel; | 419 DCHECK(!ipc_channel_); |
| 444 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 420 ipc_channel_ = channel; |
| 445 filter->OnFilterAdded(channel_); | 421 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 446 } | 422 filter->OnFilterAdded(ipc_channel_); |
| 447 } | 423 } |
| 448 | 424 |
| 449 void GpuChannelMessageFilter::OnFilterRemoved() { | 425 void GpuChannelMessageFilter::OnFilterRemoved() { |
| 450 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 426 base::AutoLock auto_lock(lock_); |
| 427 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) | |
| 451 filter->OnFilterRemoved(); | 428 filter->OnFilterRemoved(); |
| 452 } | 429 ipc_channel_ = nullptr; |
| 453 channel_ = nullptr; | |
| 454 peer_pid_ = base::kNullProcessId; | 430 peer_pid_ = base::kNullProcessId; |
| 455 } | 431 } |
| 456 | 432 |
| 457 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { | 433 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { |
| 434 base::AutoLock auto_lock(lock_); | |
| 458 DCHECK(peer_pid_ == base::kNullProcessId); | 435 DCHECK(peer_pid_ == base::kNullProcessId); |
| 459 peer_pid_ = peer_pid; | 436 peer_pid_ = peer_pid; |
| 460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 437 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 461 filter->OnChannelConnected(peer_pid); | 438 filter->OnChannelConnected(peer_pid); |
| 462 } | |
| 463 } | 439 } |
| 464 | 440 |
| 465 void GpuChannelMessageFilter::OnChannelError() { | 441 void GpuChannelMessageFilter::OnChannelError() { |
| 466 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 442 base::AutoLock auto_lock(lock_); |
| 443 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) | |
| 467 filter->OnChannelError(); | 444 filter->OnChannelError(); |
| 468 } | |
| 469 } | 445 } |
| 470 | 446 |
| 471 void GpuChannelMessageFilter::OnChannelClosing() { | 447 void GpuChannelMessageFilter::OnChannelClosing() { |
| 472 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 448 base::AutoLock auto_lock(lock_); |
| 449 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) | |
| 473 filter->OnChannelClosing(); | 450 filter->OnChannelClosing(); |
| 474 } | |
| 475 } | 451 } |
| 476 | 452 |
| 477 void GpuChannelMessageFilter::AddChannelFilter( | 453 void GpuChannelMessageFilter::AddChannelFilter( |
| 478 scoped_refptr<IPC::MessageFilter> filter) { | 454 scoped_refptr<IPC::MessageFilter> filter) { |
| 455 base::AutoLock auto_lock(lock_); | |
| 479 channel_filters_.push_back(filter); | 456 channel_filters_.push_back(filter); |
| 480 if (channel_) | 457 if (ipc_channel_) |
| 481 filter->OnFilterAdded(channel_); | 458 filter->OnFilterAdded(ipc_channel_); |
| 482 if (peer_pid_ != base::kNullProcessId) | 459 if (peer_pid_ != base::kNullProcessId) |
| 483 filter->OnChannelConnected(peer_pid_); | 460 filter->OnChannelConnected(peer_pid_); |
| 484 } | 461 } |
| 485 | 462 |
| 486 void GpuChannelMessageFilter::RemoveChannelFilter( | 463 void GpuChannelMessageFilter::RemoveChannelFilter( |
| 487 scoped_refptr<IPC::MessageFilter> filter) { | 464 scoped_refptr<IPC::MessageFilter> filter) { |
| 488 if (channel_) | 465 base::AutoLock auto_lock(lock_); |
| 466 if (ipc_channel_) | |
| 489 filter->OnFilterRemoved(); | 467 filter->OnFilterRemoved(); |
| 490 channel_filters_.erase( | 468 base::Erase(channel_filters_, filter); |
| 491 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); | |
| 492 } | 469 } |
| 493 | 470 |
| 494 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { | 471 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| 495 DCHECK(channel_); | 472 base::AutoLock auto_lock(lock_); |
| 473 | |
| 474 DCHECK(ipc_channel_); | |
| 475 | |
| 476 if (!gpu_channel_) | |
| 477 return MessageErrorHandler(message, "Channel destroyed"); | |
| 496 | 478 |
| 497 if (message.should_unblock() || message.is_reply()) | 479 if (message.should_unblock() || message.is_reply()) |
| 498 return MessageErrorHandler(message, "Unexpected message type"); | 480 return MessageErrorHandler(message, "Unexpected message type"); |
| 499 | 481 |
| 500 if (message.type() == GpuChannelMsg_Nop::ID) { | 482 if (message.type() == GpuChannelMsg_Nop::ID) { |
| 501 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 483 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 502 Send(reply); | 484 Send(reply); |
| 503 return true; | 485 return true; |
| 504 } | 486 } |
| 505 | 487 |
| 506 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 488 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 507 if (filter->OnMessageReceived(message)) | 489 if (filter->OnMessageReceived(message)) |
| 508 return true; | 490 return true; |
| 509 } | 491 } |
| 510 | 492 |
| 511 if (!message_queue_->PushBackMessage(message)) | 493 gpu_channel_->HandleMessageOnIOThread(message); |
| 512 return MessageErrorHandler(message, "Channel destroyed"); | |
| 513 | |
| 514 return true; | 494 return true; |
| 515 } | 495 } |
| 516 | 496 |
| 517 bool GpuChannelMessageFilter::Send(IPC::Message* message) { | 497 bool GpuChannelMessageFilter::Send(IPC::Message* message) { |
| 518 return channel_->Send(message); | 498 return ipc_channel_->Send(message); |
| 519 } | 499 } |
| 520 | 500 |
| 521 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, | 501 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, |
| 522 const char* error_msg) { | 502 const char* error_msg) { |
| 523 DLOG(ERROR) << error_msg; | 503 DLOG(ERROR) << error_msg; |
| 524 if (message.is_sync()) { | 504 if (message.is_sync()) { |
| 525 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 505 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 526 reply->set_reply_error(); | 506 reply->set_reply_error(); |
| 527 Send(reply); | 507 Send(reply); |
| 528 } | 508 } |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 554 preempting_flag_(preempting_flag), | 534 preempting_flag_(preempting_flag), |
| 555 preempted_flag_(preempted_flag), | 535 preempted_flag_(preempted_flag), |
| 556 client_id_(client_id), | 536 client_id_(client_id), |
| 557 client_tracing_id_(client_tracing_id), | 537 client_tracing_id_(client_tracing_id), |
| 558 task_runner_(task_runner), | 538 task_runner_(task_runner), |
| 559 io_task_runner_(io_task_runner), | 539 io_task_runner_(io_task_runner), |
| 560 share_group_(share_group), | 540 share_group_(share_group), |
| 561 mailbox_manager_(mailbox), | 541 mailbox_manager_(mailbox), |
| 562 watchdog_(watchdog), | 542 watchdog_(watchdog), |
| 563 allow_view_command_buffers_(allow_view_command_buffers), | 543 allow_view_command_buffers_(allow_view_command_buffers), |
| 564 allow_real_time_streams_(allow_real_time_streams), | 544 allow_real_time_streams_(allow_real_time_streams) { |
| 565 weak_factory_(this) { | |
| 566 DCHECK(gpu_channel_manager); | 545 DCHECK(gpu_channel_manager); |
| 567 DCHECK(client_id); | 546 DCHECK(client_id); |
| 568 | 547 |
| 569 message_queue_ = | 548 message_queue_ = |
| 570 GpuChannelMessageQueue::Create(this, io_task_runner, preempting_flag, | 549 GpuChannelMessageQueue::Create(this, io_task_runner, preempting_flag, |
| 571 preempted_flag, sync_point_manager); | 550 preempted_flag, sync_point_manager); |
| 572 | 551 |
| 573 filter_ = new GpuChannelMessageFilter(message_queue_); | 552 filter_ = new GpuChannelMessageFilter(this); |
| 574 } | 553 } |
| 575 | 554 |
| 576 GpuChannel::~GpuChannel() { | 555 GpuChannel::~GpuChannel() { |
| 577 // Clear stubs first because of dependencies. | 556 // Clear stubs first because of dependencies. |
| 578 stubs_.clear(); | 557 stubs_.clear(); |
| 579 | 558 |
| 580 message_queue_->Disable(); | 559 message_queue_->Destroy(); |
|
piman
2017/03/24 04:31:40
This has a race, because the message queue gets de
sunnyps
2017/03/24 21:27:22
That's a mistake, I meant to do filter_->Destroy f
| |
| 560 | |
| 561 filter_->Destroy(); | |
| 581 | 562 |
| 582 if (preempting_flag_.get()) | 563 if (preempting_flag_.get()) |
| 583 preempting_flag_->Reset(); | 564 preempting_flag_->Reset(); |
| 584 } | 565 } |
| 585 | 566 |
| 586 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { | 567 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { |
| 587 DCHECK(shutdown_event); | 568 DCHECK(shutdown_event); |
| 588 DCHECK(!channel_); | 569 DCHECK(!channel_); |
| 589 | 570 |
| 590 mojo::MessagePipe pipe; | 571 mojo::MessagePipe pipe; |
| 591 channel_ = IPC::SyncChannel::Create(pipe.handle0.release(), | 572 channel_ = IPC::SyncChannel::Create(pipe.handle0.release(), |
| 592 IPC::Channel::MODE_SERVER, this, | 573 IPC::Channel::MODE_SERVER, this, |
| 593 io_task_runner_, false, shutdown_event); | 574 io_task_runner_, false, shutdown_event); |
| 594 | 575 |
| 595 channel_->AddFilter(filter_.get()); | 576 channel_->AddFilter(filter_.get()); |
| 596 | 577 |
| 597 return pipe.handle1.release(); | 578 return pipe.handle1.release(); |
| 598 } | 579 } |
| 599 | 580 |
| 600 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { | 581 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { |
| 601 unhandled_message_listener_ = listener; | 582 unhandled_message_listener_ = listener; |
| 602 } | 583 } |
| 603 | 584 |
| 604 base::WeakPtr<GpuChannel> GpuChannel::AsWeakPtr() { | |
| 605 return weak_factory_.GetWeakPtr(); | |
| 606 } | |
| 607 | |
| 608 base::ProcessId GpuChannel::GetClientPID() const { | 585 base::ProcessId GpuChannel::GetClientPID() const { |
| 609 DCHECK_NE(peer_pid_, base::kNullProcessId); | 586 DCHECK_NE(peer_pid_, base::kNullProcessId); |
| 610 return peer_pid_; | 587 return peer_pid_; |
| 611 } | 588 } |
| 612 | 589 |
| 613 bool GpuChannel::OnMessageReceived(const IPC::Message& msg) { | 590 bool GpuChannel::OnMessageReceived(const IPC::Message& msg) { |
| 614 // All messages should be pushed to channel_messages_ and handled separately. | 591 // All messages should be pushed to channel_messages_ and handled separately. |
| 615 NOTREACHED(); | 592 NOTREACHED(); |
| 616 return false; | 593 return false; |
| 617 } | 594 } |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 685 OnCreateCommandBuffer) | 662 OnCreateCommandBuffer) |
| 686 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, | 663 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, |
| 687 OnDestroyCommandBuffer) | 664 OnDestroyCommandBuffer) |
| 688 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, | 665 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, |
| 689 OnGetDriverBugWorkArounds) | 666 OnGetDriverBugWorkArounds) |
| 690 IPC_MESSAGE_UNHANDLED(handled = false) | 667 IPC_MESSAGE_UNHANDLED(handled = false) |
| 691 IPC_END_MESSAGE_MAP() | 668 IPC_END_MESSAGE_MAP() |
| 692 return handled; | 669 return handled; |
| 693 } | 670 } |
| 694 | 671 |
| 695 void GpuChannel::PostHandleMessage() { | 672 void GpuChannel::PostHandleMessageOnQueue() { |
| 696 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | 673 task_runner_->PostTask( |
| 697 weak_factory_.GetWeakPtr())); | 674 FROM_HERE, base::Bind(&GpuChannel::HandleMessageOnQueue, AsWeakPtr())); |
| 698 } | 675 } |
| 699 | 676 |
| 700 void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { | 677 void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { |
| 701 task_runner_->PostTask(FROM_HERE, | 678 task_runner_->PostTask( |
| 702 base::Bind(&GpuChannel::HandleOutOfOrderMessage, | 679 FROM_HERE, |
| 703 weak_factory_.GetWeakPtr(), msg)); | 680 base::Bind(&GpuChannel::HandleOutOfOrderMessage, AsWeakPtr(), msg)); |
| 704 } | 681 } |
| 705 | 682 |
| 706 void GpuChannel::HandleMessage() { | 683 void GpuChannel::HandleMessageOnIOThread(const IPC::Message& msg) { |
| 684 if (msg.routing_id() == MSG_ROUTING_CONTROL || | |
|
sunnyps
2017/03/24 21:27:22
Actually control messages should not be PostTask'd
| |
| 685 msg.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | |
| 686 msg.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | |
| 687 PostHandleOutOfOrderMessage(msg); | |
| 688 } else { | |
| 689 // TODO(sunnyps): Lookup sequence id and post task to that sequence in the | |
| 690 // scheduler. | |
| 691 message_queue_->PushBackMessage(msg); | |
| 692 } | |
| 693 } | |
| 694 | |
| 695 void GpuChannel::HandleMessageOnQueue() { | |
| 707 const GpuChannelMessage* channel_msg = | 696 const GpuChannelMessage* channel_msg = |
| 708 message_queue_->BeginMessageProcessing(); | 697 message_queue_->BeginMessageProcessing(); |
| 709 if (!channel_msg) | 698 if (!channel_msg) |
| 710 return; | 699 return; |
| 711 | 700 |
| 712 const IPC::Message& msg = channel_msg->message; | 701 const IPC::Message& msg = channel_msg->message; |
| 713 int32_t routing_id = msg.routing_id(); | 702 int32_t routing_id = msg.routing_id(); |
| 714 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); | 703 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); |
| 715 | 704 |
| 716 DCHECK(!stub || stub->IsScheduled()); | 705 DCHECK(!stub || stub->IsScheduled()); |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 888 GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) | 877 GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) |
| 889 #undef GPU_OP | 878 #undef GPU_OP |
| 890 } | 879 } |
| 891 | 880 |
| 892 void GpuChannel::CacheShader(const std::string& key, | 881 void GpuChannel::CacheShader(const std::string& key, |
| 893 const std::string& shader) { | 882 const std::string& shader) { |
| 894 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); | 883 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); |
| 895 } | 884 } |
| 896 | 885 |
| 897 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 886 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| 898 io_task_runner_->PostTask( | 887 filter_->AddChannelFilter(filter); |
|
piman
2017/03/24 04:31:40
I don't think it's generally safe to call filters
sunnyps
2017/03/24 21:27:22
Done.
| |
| 899 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_, | |
| 900 make_scoped_refptr(filter))); | |
| 901 } | 888 } |
| 902 | 889 |
| 903 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { | 890 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { |
| 904 io_task_runner_->PostTask( | 891 filter_->RemoveChannelFilter(filter); |
|
piman
2017/03/24 04:31:40
ditto
sunnyps
2017/03/24 21:27:22
Done.
| |
| 905 FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, | |
| 906 filter_, make_scoped_refptr(filter))); | |
| 907 } | 892 } |
| 908 | 893 |
| 909 uint64_t GpuChannel::GetMemoryUsage() { | 894 uint64_t GpuChannel::GetMemoryUsage() { |
| 910 // Collect the unique memory trackers in use by the |stubs_|. | 895 // Collect the unique memory trackers in use by the |stubs_|. |
| 911 std::set<gles2::MemoryTracker*> unique_memory_trackers; | 896 std::set<gles2::MemoryTracker*> unique_memory_trackers; |
| 912 for (auto& kv : stubs_) | 897 for (auto& kv : stubs_) |
| 913 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); | 898 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); |
| 914 | 899 |
| 915 // Sum the memory usage for all unique memory trackers. | 900 // Sum the memory usage for all unique memory trackers. |
| 916 uint64_t size = 0; | 901 uint64_t size = 0; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 948 | 933 |
| 949 return manager->gpu_memory_buffer_factory() | 934 return manager->gpu_memory_buffer_factory() |
| 950 ->AsImageFactory() | 935 ->AsImageFactory() |
| 951 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, | 936 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, |
| 952 client_id_, surface_handle); | 937 client_id_, surface_handle); |
| 953 } | 938 } |
| 954 } | 939 } |
| 955 } | 940 } |
| 956 | 941 |
| 957 } // namespace gpu | 942 } // namespace gpu |
| OLD | NEW |