| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/service/gpu_channel.h" | 5 #include "gpu/ipc/service/gpu_channel.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #if defined(OS_WIN) | 9 #if defined(OS_WIN) |
| 10 #include <windows.h> | 10 #include <windows.h> |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 64 // below this threshold. | 64 // below this threshold. |
| 65 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs; | 65 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 66 | 66 |
| 67 CommandBufferId GenerateCommandBufferId(int channel_id, int32_t route_id) { | 67 CommandBufferId GenerateCommandBufferId(int channel_id, int32_t route_id) { |
| 68 return CommandBufferId::FromUnsafeValue( | 68 return CommandBufferId::FromUnsafeValue( |
| 69 (static_cast<uint64_t>(channel_id) << 32) | route_id); | 69 (static_cast<uint64_t>(channel_id) << 32) | route_id); |
| 70 } | 70 } |
| 71 | 71 |
| 72 } // anonymous namespace | 72 } // anonymous namespace |
| 73 | 73 |
| 74 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( | |
| 75 GpuChannel* channel, | |
| 76 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, | |
| 77 scoped_refptr<PreemptionFlag> preempting_flag, | |
| 78 scoped_refptr<PreemptionFlag> preempted_flag, | |
| 79 SyncPointManager* sync_point_manager) { | |
| 80 return new GpuChannelMessageQueue( | |
| 81 channel, std::move(io_task_runner), std::move(preempting_flag), | |
| 82 std::move(preempted_flag), sync_point_manager); | |
| 83 } | |
| 84 | |
| 85 GpuChannelMessageQueue::GpuChannelMessageQueue( | 74 GpuChannelMessageQueue::GpuChannelMessageQueue( |
| 86 GpuChannel* channel, | 75 GpuChannel* channel, |
| 76 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, |
| 87 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, | 77 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, |
| 88 scoped_refptr<PreemptionFlag> preempting_flag, | 78 scoped_refptr<PreemptionFlag> preempting_flag, |
| 89 scoped_refptr<PreemptionFlag> preempted_flag, | 79 scoped_refptr<PreemptionFlag> preempted_flag, |
| 90 SyncPointManager* sync_point_manager) | 80 SyncPointManager* sync_point_manager) |
| 91 : enabled_(true), | 81 : channel_(channel), |
| 92 scheduled_(true), | |
| 93 channel_(channel), | |
| 94 preemption_state_(IDLE), | |
| 95 max_preemption_time_( | 82 max_preemption_time_( |
| 96 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), | 83 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), |
| 97 timer_(new base::OneShotTimer), | 84 timer_(new base::OneShotTimer), |
| 98 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), | 85 sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), |
| 86 main_task_runner_(std::move(main_task_runner)), |
| 99 io_task_runner_(std::move(io_task_runner)), | 87 io_task_runner_(std::move(io_task_runner)), |
| 100 preempting_flag_(std::move(preempting_flag)), | 88 preempting_flag_(std::move(preempting_flag)), |
| 101 preempted_flag_(std::move(preempted_flag)), | 89 preempted_flag_(std::move(preempted_flag)), |
| 102 sync_point_manager_(sync_point_manager) { | 90 sync_point_manager_(sync_point_manager) { |
| 103 timer_->SetTaskRunner(io_task_runner_); | 91 timer_->SetTaskRunner(io_task_runner_); |
| 104 io_thread_checker_.DetachFromThread(); | 92 io_thread_checker_.DetachFromThread(); |
| 105 } | 93 } |
| 106 | 94 |
| 107 GpuChannelMessageQueue::~GpuChannelMessageQueue() { | 95 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| 108 DCHECK(!enabled_); | |
| 109 DCHECK(channel_messages_.empty()); | 96 DCHECK(channel_messages_.empty()); |
| 110 } | 97 } |
| 111 | 98 |
| 112 void GpuChannelMessageQueue::Disable() { | 99 void GpuChannelMessageQueue::Destroy() { |
| 113 { | 100 // We guarantee that the queue will no longer be modified after Destroy is |
| 114 base::AutoLock auto_lock(channel_lock_); | 101 // called, it is now safe to modify the queue without the lock. All public |
| 115 DCHECK(enabled_); | 102 // facing modifying functions check enabled_ while all private modifying |
| 116 enabled_ = false; | 103 // functions DCHECK(enabled_) to enforce this. |
| 117 } | |
| 118 | |
| 119 // We guarantee that the queues will no longer be modified after enabled_ | |
| 120 // is set to false, it is now safe to modify the queue without the lock. | |
| 121 // All public facing modifying functions check enabled_ while all | |
| 122 // private modifying functions DCHECK(enabled_) to enforce this. | |
| 123 while (!channel_messages_.empty()) { | 104 while (!channel_messages_.empty()) { |
| 124 const IPC::Message& msg = channel_messages_.front()->message; | 105 const IPC::Message& msg = channel_messages_.front()->message; |
| 125 if (msg.is_sync()) { | 106 if (msg.is_sync()) { |
| 126 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); | 107 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
| 127 reply->set_reply_error(); | 108 reply->set_reply_error(); |
| 128 channel_->Send(reply); | 109 channel_->Send(reply); |
| 129 } | 110 } |
| 130 channel_messages_.pop_front(); | 111 channel_messages_.pop_front(); |
| 131 } | 112 } |
| 132 | 113 |
| 133 if (sync_point_order_data_) { | 114 sync_point_order_data_->Destroy(); |
| 134 sync_point_order_data_->Destroy(); | |
| 135 sync_point_order_data_ = nullptr; | |
| 136 } | |
| 137 | 115 |
| 116 // Destroy timer on io thread. |
| 138 io_task_runner_->PostTask( | 117 io_task_runner_->PostTask( |
| 139 FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this)); | 118 FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {}, |
| 140 } | 119 base::Passed(&timer_))); |
| 141 | 120 |
| 142 void GpuChannelMessageQueue::DisableIO() { | 121 channel_ = nullptr; |
| 143 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
| 144 timer_ = nullptr; | |
| 145 } | 122 } |
| 146 | 123 |
| 147 bool GpuChannelMessageQueue::IsScheduled() const { | 124 bool GpuChannelMessageQueue::IsScheduled() const { |
| 148 base::AutoLock lock(channel_lock_); | 125 base::AutoLock lock(channel_lock_); |
| 149 return scheduled_; | 126 return scheduled_; |
| 150 } | 127 } |
| 151 | 128 |
| 152 void GpuChannelMessageQueue::SetScheduled(bool scheduled) { | 129 void GpuChannelMessageQueue::SetScheduled(bool scheduled) { |
| 153 base::AutoLock lock(channel_lock_); | 130 base::AutoLock lock(channel_lock_); |
| 154 DCHECK(enabled_); | |
| 155 if (scheduled_ == scheduled) | 131 if (scheduled_ == scheduled) |
| 156 return; | 132 return; |
| 157 scheduled_ = scheduled; | 133 scheduled_ = scheduled; |
| 158 if (scheduled) | 134 if (scheduled) |
| 159 channel_->PostHandleMessage(); | 135 PostHandleMessageOnQueue(); |
| 160 if (preempting_flag_) { | 136 if (preempting_flag_) { |
| 161 io_task_runner_->PostTask( | 137 io_task_runner_->PostTask( |
| 162 FROM_HERE, | 138 FROM_HERE, |
| 163 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | 139 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 164 } | 140 } |
| 165 } | 141 } |
| 166 | 142 |
| 167 bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { | 143 void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| 168 base::AutoLock auto_lock(channel_lock_); | 144 base::AutoLock auto_lock(channel_lock_); |
| 169 if (enabled_) { | 145 DCHECK(channel_); |
| 170 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | 146 uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(); |
| 171 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | 147 std::unique_ptr<GpuChannelMessage> msg( |
| 172 channel_->PostHandleOutOfOrderMessage(message); | 148 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); |
| 173 return true; | |
| 174 } | |
| 175 | 149 |
| 176 uint32_t order_num = | 150 channel_messages_.push_back(std::move(msg)); |
| 177 sync_point_order_data_->GenerateUnprocessedOrderNumber(); | |
| 178 std::unique_ptr<GpuChannelMessage> msg( | |
| 179 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); | |
| 180 | 151 |
| 181 if (channel_messages_.empty()) { | 152 bool first_message = channel_messages_.size() == 1; |
| 182 DCHECK(scheduled_); | 153 if (first_message) |
| 183 channel_->PostHandleMessage(); | 154 PostHandleMessageOnQueue(); |
| 184 } | |
| 185 | 155 |
| 186 channel_messages_.push_back(std::move(msg)); | 156 if (preempting_flag_) |
| 157 UpdatePreemptionStateHelper(); |
| 158 } |
| 187 | 159 |
| 188 if (preempting_flag_) | 160 void GpuChannelMessageQueue::PostHandleMessageOnQueue() { |
| 189 UpdatePreemptionStateHelper(); | 161 channel_lock_.AssertAcquired(); |
| 190 | 162 DCHECK(channel_); |
| 191 return true; | 163 DCHECK(scheduled_); |
| 192 } | 164 DCHECK(!channel_messages_.empty()); |
| 193 return false; | 165 DCHECK(!handle_message_post_task_pending_); |
| 166 handle_message_post_task_pending_ = true; |
| 167 main_task_runner_->PostTask( |
| 168 FROM_HERE, |
| 169 base::Bind(&GpuChannel::HandleMessageOnQueue, channel_->AsWeakPtr())); |
| 194 } | 170 } |
| 195 | 171 |
| 196 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { | 172 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { |
| 197 base::AutoLock auto_lock(channel_lock_); | 173 base::AutoLock auto_lock(channel_lock_); |
| 198 DCHECK(enabled_); | 174 DCHECK(channel_); |
| 175 DCHECK(scheduled_); |
| 176 DCHECK(!channel_messages_.empty()); |
| 177 handle_message_post_task_pending_ = false; |
| 199 // If we have been preempted by another channel, just post a task to wake up. | 178 // If we have been preempted by another channel, just post a task to wake up. |
| 200 if (preempted_flag_ && preempted_flag_->IsSet()) { | 179 if (preempted_flag_ && preempted_flag_->IsSet()) { |
| 201 channel_->PostHandleMessage(); | 180 PostHandleMessageOnQueue(); |
| 202 return nullptr; | 181 return nullptr; |
| 203 } | 182 } |
| 204 if (channel_messages_.empty()) | |
| 205 return nullptr; | |
| 206 sync_point_order_data_->BeginProcessingOrderNumber( | 183 sync_point_order_data_->BeginProcessingOrderNumber( |
| 207 channel_messages_.front()->order_number); | 184 channel_messages_.front()->order_number); |
| 208 return channel_messages_.front().get(); | 185 return channel_messages_.front().get(); |
| 209 } | 186 } |
| 210 | 187 |
| 211 void GpuChannelMessageQueue::PauseMessageProcessing() { | 188 void GpuChannelMessageQueue::PauseMessageProcessing() { |
| 212 base::AutoLock auto_lock(channel_lock_); | 189 base::AutoLock auto_lock(channel_lock_); |
| 213 DCHECK(!channel_messages_.empty()); | 190 DCHECK(!channel_messages_.empty()); |
| 214 | 191 |
| 215 // If we have been preempted by another channel, just post a task to wake up. | 192 // If we have been preempted by another channel, just post a task to wake up. |
| 216 if (scheduled_) | 193 if (scheduled_) |
| 217 channel_->PostHandleMessage(); | 194 PostHandleMessageOnQueue(); |
| 218 | 195 |
| 219 sync_point_order_data_->PauseProcessingOrderNumber( | 196 sync_point_order_data_->PauseProcessingOrderNumber( |
| 220 channel_messages_.front()->order_number); | 197 channel_messages_.front()->order_number); |
| 221 } | 198 } |
| 222 | 199 |
| 223 void GpuChannelMessageQueue::FinishMessageProcessing() { | 200 void GpuChannelMessageQueue::FinishMessageProcessing() { |
| 224 base::AutoLock auto_lock(channel_lock_); | 201 base::AutoLock auto_lock(channel_lock_); |
| 225 DCHECK(!channel_messages_.empty()); | 202 DCHECK(!channel_messages_.empty()); |
| 226 DCHECK(scheduled_); | 203 DCHECK(scheduled_); |
| 227 | 204 |
| 228 sync_point_order_data_->FinishProcessingOrderNumber( | 205 sync_point_order_data_->FinishProcessingOrderNumber( |
| 229 channel_messages_.front()->order_number); | 206 channel_messages_.front()->order_number); |
| 230 channel_messages_.pop_front(); | 207 channel_messages_.pop_front(); |
| 231 | 208 |
| 232 if (!channel_messages_.empty()) | 209 if (!channel_messages_.empty()) |
| 233 channel_->PostHandleMessage(); | 210 PostHandleMessageOnQueue(); |
| 234 | 211 |
| 235 if (preempting_flag_) { | 212 if (preempting_flag_) { |
| 236 io_task_runner_->PostTask( | 213 io_task_runner_->PostTask( |
| 237 FROM_HERE, | 214 FROM_HERE, |
| 238 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | 215 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 239 } | 216 } |
| 240 } | 217 } |
| 241 | 218 |
| 242 void GpuChannelMessageQueue::UpdatePreemptionState() { | 219 void GpuChannelMessageQueue::UpdatePreemptionState() { |
| 243 DCHECK(io_thread_checker_.CalledOnValidThread()); | 220 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 424 channel_lock_.AssertAcquired(); | 401 channel_lock_.AssertAcquired(); |
| 425 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); | 402 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); |
| 426 DCHECK(!scheduled_); | 403 DCHECK(!scheduled_); |
| 427 | 404 |
| 428 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | 405 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
| 429 preempting_flag_->Reset(); | 406 preempting_flag_->Reset(); |
| 430 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | 407 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 431 } | 408 } |
| 432 | 409 |
| 433 GpuChannelMessageFilter::GpuChannelMessageFilter( | 410 GpuChannelMessageFilter::GpuChannelMessageFilter( |
| 434 scoped_refptr<GpuChannelMessageQueue> message_queue) | 411 GpuChannel* gpu_channel, |
| 435 : message_queue_(std::move(message_queue)), | 412 scoped_refptr<GpuChannelMessageQueue> message_queue, |
| 436 channel_(nullptr), | 413 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) |
| 437 peer_pid_(base::kNullProcessId) {} | 414 : gpu_channel_(gpu_channel), |
| 415 message_queue_(std::move(message_queue)), |
| 416 main_task_runner_(std::move(main_task_runner)) {} |
| 438 | 417 |
| 439 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} | 418 GpuChannelMessageFilter::~GpuChannelMessageFilter() { |
| 419 DCHECK(!gpu_channel_); |
| 420 } |
| 421 |
| 422 void GpuChannelMessageFilter::Destroy() { |
| 423 base::AutoLock auto_lock(gpu_channel_lock_); |
| 424 gpu_channel_ = nullptr; |
| 425 } |
| 440 | 426 |
| 441 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { | 427 void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { |
| 442 DCHECK(!channel_); | 428 DCHECK(!ipc_channel_); |
| 443 channel_ = channel; | 429 ipc_channel_ = channel; |
| 444 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 430 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 445 filter->OnFilterAdded(channel_); | 431 filter->OnFilterAdded(ipc_channel_); |
| 446 } | |
| 447 } | 432 } |
| 448 | 433 |
| 449 void GpuChannelMessageFilter::OnFilterRemoved() { | 434 void GpuChannelMessageFilter::OnFilterRemoved() { |
| 450 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 435 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 451 filter->OnFilterRemoved(); | 436 filter->OnFilterRemoved(); |
| 452 } | 437 ipc_channel_ = nullptr; |
| 453 channel_ = nullptr; | |
| 454 peer_pid_ = base::kNullProcessId; | 438 peer_pid_ = base::kNullProcessId; |
| 455 } | 439 } |
| 456 | 440 |
| 457 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { | 441 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { |
| 458 DCHECK(peer_pid_ == base::kNullProcessId); | 442 DCHECK(peer_pid_ == base::kNullProcessId); |
| 459 peer_pid_ = peer_pid; | 443 peer_pid_ = peer_pid; |
| 460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 444 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 461 filter->OnChannelConnected(peer_pid); | 445 filter->OnChannelConnected(peer_pid); |
| 462 } | |
| 463 } | 446 } |
| 464 | 447 |
| 465 void GpuChannelMessageFilter::OnChannelError() { | 448 void GpuChannelMessageFilter::OnChannelError() { |
| 466 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 449 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 467 filter->OnChannelError(); | 450 filter->OnChannelError(); |
| 468 } | |
| 469 } | 451 } |
| 470 | 452 |
| 471 void GpuChannelMessageFilter::OnChannelClosing() { | 453 void GpuChannelMessageFilter::OnChannelClosing() { |
| 472 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 454 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| 473 filter->OnChannelClosing(); | 455 filter->OnChannelClosing(); |
| 474 } | |
| 475 } | 456 } |
| 476 | 457 |
| 477 void GpuChannelMessageFilter::AddChannelFilter( | 458 void GpuChannelMessageFilter::AddChannelFilter( |
| 478 scoped_refptr<IPC::MessageFilter> filter) { | 459 scoped_refptr<IPC::MessageFilter> filter) { |
| 479 channel_filters_.push_back(filter); | 460 channel_filters_.push_back(filter); |
| 480 if (channel_) | 461 if (ipc_channel_) |
| 481 filter->OnFilterAdded(channel_); | 462 filter->OnFilterAdded(ipc_channel_); |
| 482 if (peer_pid_ != base::kNullProcessId) | 463 if (peer_pid_ != base::kNullProcessId) |
| 483 filter->OnChannelConnected(peer_pid_); | 464 filter->OnChannelConnected(peer_pid_); |
| 484 } | 465 } |
| 485 | 466 |
| 486 void GpuChannelMessageFilter::RemoveChannelFilter( | 467 void GpuChannelMessageFilter::RemoveChannelFilter( |
| 487 scoped_refptr<IPC::MessageFilter> filter) { | 468 scoped_refptr<IPC::MessageFilter> filter) { |
| 488 if (channel_) | 469 if (ipc_channel_) |
| 489 filter->OnFilterRemoved(); | 470 filter->OnFilterRemoved(); |
| 490 channel_filters_.erase( | 471 base::Erase(channel_filters_, filter); |
| 491 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); | |
| 492 } | 472 } |
| 493 | 473 |
| 494 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { | 474 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| 495 DCHECK(channel_); | 475 DCHECK(ipc_channel_); |
| 496 | 476 |
| 497 if (message.should_unblock() || message.is_reply()) | 477 if (message.should_unblock() || message.is_reply()) |
| 498 return MessageErrorHandler(message, "Unexpected message type"); | 478 return MessageErrorHandler(message, "Unexpected message type"); |
| 499 | 479 |
| 500 if (message.type() == GpuChannelMsg_Nop::ID) { | 480 if (message.type() == GpuChannelMsg_Nop::ID) { |
| 501 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 481 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 502 Send(reply); | 482 Send(reply); |
| 503 return true; | 483 return true; |
| 504 } | 484 } |
| 505 | 485 |
| 506 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 486 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 507 if (filter->OnMessageReceived(message)) | 487 if (filter->OnMessageReceived(message)) |
| 508 return true; | 488 return true; |
| 509 } | 489 } |
| 510 | 490 |
| 511 if (!message_queue_->PushBackMessage(message)) | 491 base::AutoLock auto_lock(gpu_channel_lock_); |
| 492 if (!gpu_channel_) |
| 512 return MessageErrorHandler(message, "Channel destroyed"); | 493 return MessageErrorHandler(message, "Channel destroyed"); |
| 513 | 494 |
| 495 if (message.routing_id() == MSG_ROUTING_CONTROL || |
| 496 message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 497 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 498 // It's OK to post task that may never run even for sync messages, because |
| 499 // if the channel is destroyed, the client Send will fail. |
| 500 main_task_runner_->PostTask(FROM_HERE, |
| 501 base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
| 502 gpu_channel_->AsWeakPtr(), message)); |
| 503 } else { |
| 504 // Message queue takes care of PostTask. |
| 505 message_queue_->PushBackMessage(message); |
| 506 } |
| 507 |
| 514 return true; | 508 return true; |
| 515 } | 509 } |
| 516 | 510 |
| 517 bool GpuChannelMessageFilter::Send(IPC::Message* message) { | 511 bool GpuChannelMessageFilter::Send(IPC::Message* message) { |
| 518 return channel_->Send(message); | 512 return ipc_channel_->Send(message); |
| 519 } | 513 } |
| 520 | 514 |
| 521 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, | 515 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, |
| 522 const char* error_msg) { | 516 const char* error_msg) { |
| 523 DLOG(ERROR) << error_msg; | 517 DLOG(ERROR) << error_msg; |
| 524 if (message.is_sync()) { | 518 if (message.is_sync()) { |
| 525 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 519 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 526 reply->set_reply_error(); | 520 reply->set_reply_error(); |
| 527 Send(reply); | 521 Send(reply); |
| 528 } | 522 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 543 PreemptionFlag* preempting_flag, | 537 PreemptionFlag* preempting_flag, |
| 544 PreemptionFlag* preempted_flag, | 538 PreemptionFlag* preempted_flag, |
| 545 base::SingleThreadTaskRunner* task_runner, | 539 base::SingleThreadTaskRunner* task_runner, |
| 546 base::SingleThreadTaskRunner* io_task_runner, | 540 base::SingleThreadTaskRunner* io_task_runner, |
| 547 int32_t client_id, | 541 int32_t client_id, |
| 548 uint64_t client_tracing_id, | 542 uint64_t client_tracing_id, |
| 549 bool allow_view_command_buffers, | 543 bool allow_view_command_buffers, |
| 550 bool allow_real_time_streams) | 544 bool allow_real_time_streams) |
| 551 : gpu_channel_manager_(gpu_channel_manager), | 545 : gpu_channel_manager_(gpu_channel_manager), |
| 552 sync_point_manager_(sync_point_manager), | 546 sync_point_manager_(sync_point_manager), |
| 553 unhandled_message_listener_(nullptr), | |
| 554 preempting_flag_(preempting_flag), | 547 preempting_flag_(preempting_flag), |
| 555 preempted_flag_(preempted_flag), | 548 preempted_flag_(preempted_flag), |
| 556 client_id_(client_id), | 549 client_id_(client_id), |
| 557 client_tracing_id_(client_tracing_id), | 550 client_tracing_id_(client_tracing_id), |
| 558 task_runner_(task_runner), | 551 task_runner_(task_runner), |
| 559 io_task_runner_(io_task_runner), | 552 io_task_runner_(io_task_runner), |
| 560 share_group_(share_group), | 553 share_group_(share_group), |
| 561 mailbox_manager_(mailbox), | 554 mailbox_manager_(mailbox), |
| 562 watchdog_(watchdog), | 555 watchdog_(watchdog), |
| 563 allow_view_command_buffers_(allow_view_command_buffers), | 556 allow_view_command_buffers_(allow_view_command_buffers), |
| 564 allow_real_time_streams_(allow_real_time_streams), | 557 allow_real_time_streams_(allow_real_time_streams), |
| 565 weak_factory_(this) { | 558 weak_factory_(this) { |
| 566 DCHECK(gpu_channel_manager); | 559 DCHECK(gpu_channel_manager); |
| 567 DCHECK(client_id); | 560 DCHECK(client_id); |
| 568 | 561 |
| 569 message_queue_ = | 562 message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner, |
| 570 GpuChannelMessageQueue::Create(this, io_task_runner, preempting_flag, | 563 preempting_flag, preempted_flag, |
| 571 preempted_flag, sync_point_manager); | 564 sync_point_manager); |
| 572 | 565 |
| 573 filter_ = new GpuChannelMessageFilter(message_queue_); | 566 filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner); |
| 574 } | 567 } |
| 575 | 568 |
| 576 GpuChannel::~GpuChannel() { | 569 GpuChannel::~GpuChannel() { |
| 577 // Clear stubs first because of dependencies. | 570 // Clear stubs first because of dependencies. |
| 578 stubs_.clear(); | 571 stubs_.clear(); |
| 579 | 572 |
| 580 message_queue_->Disable(); | 573 // Destroy filter first so that no message queue gets no more messages. |
| 574 filter_->Destroy(); |
| 575 |
| 576 message_queue_->Destroy(); |
| 581 | 577 |
| 582 if (preempting_flag_.get()) | 578 if (preempting_flag_.get()) |
| 583 preempting_flag_->Reset(); | 579 preempting_flag_->Reset(); |
| 584 } | 580 } |
| 585 | 581 |
| 586 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { | 582 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { |
| 587 DCHECK(shutdown_event); | 583 DCHECK(shutdown_event); |
| 588 DCHECK(!channel_); | 584 DCHECK(!channel_); |
| 589 | 585 |
| 590 mojo::MessagePipe pipe; | 586 mojo::MessagePipe pipe; |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 685 OnCreateCommandBuffer) | 681 OnCreateCommandBuffer) |
| 686 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, | 682 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, |
| 687 OnDestroyCommandBuffer) | 683 OnDestroyCommandBuffer) |
| 688 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, | 684 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, |
| 689 OnGetDriverBugWorkArounds) | 685 OnGetDriverBugWorkArounds) |
| 690 IPC_MESSAGE_UNHANDLED(handled = false) | 686 IPC_MESSAGE_UNHANDLED(handled = false) |
| 691 IPC_END_MESSAGE_MAP() | 687 IPC_END_MESSAGE_MAP() |
| 692 return handled; | 688 return handled; |
| 693 } | 689 } |
| 694 | 690 |
| 695 void GpuChannel::PostHandleMessage() { | 691 void GpuChannel::HandleMessageOnQueue() { |
| 696 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | |
| 697 weak_factory_.GetWeakPtr())); | |
| 698 } | |
| 699 | |
| 700 void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { | |
| 701 task_runner_->PostTask(FROM_HERE, | |
| 702 base::Bind(&GpuChannel::HandleOutOfOrderMessage, | |
| 703 weak_factory_.GetWeakPtr(), msg)); | |
| 704 } | |
| 705 | |
| 706 void GpuChannel::HandleMessage() { | |
| 707 const GpuChannelMessage* channel_msg = | 692 const GpuChannelMessage* channel_msg = |
| 708 message_queue_->BeginMessageProcessing(); | 693 message_queue_->BeginMessageProcessing(); |
| 709 if (!channel_msg) | 694 if (!channel_msg) |
| 710 return; | 695 return; |
| 711 | 696 |
| 712 const IPC::Message& msg = channel_msg->message; | 697 const IPC::Message& msg = channel_msg->message; |
| 713 int32_t routing_id = msg.routing_id(); | 698 int32_t routing_id = msg.routing_id(); |
| 714 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); | 699 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); |
| 715 | 700 |
| 716 DCHECK(!stub || stub->IsScheduled()); | 701 DCHECK(!stub || stub->IsScheduled()); |
| (...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 889 #undef GPU_OP | 874 #undef GPU_OP |
| 890 } | 875 } |
| 891 | 876 |
| 892 void GpuChannel::CacheShader(const std::string& key, | 877 void GpuChannel::CacheShader(const std::string& key, |
| 893 const std::string& shader) { | 878 const std::string& shader) { |
| 894 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); | 879 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); |
| 895 } | 880 } |
| 896 | 881 |
| 897 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 882 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| 898 io_task_runner_->PostTask( | 883 io_task_runner_->PostTask( |
| 899 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_, | 884 FROM_HERE, |
| 900 make_scoped_refptr(filter))); | 885 base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_, filter)); |
| 901 } | 886 } |
| 902 | 887 |
| 903 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { | 888 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { |
| 904 io_task_runner_->PostTask( | 889 io_task_runner_->PostTask( |
| 905 FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, | 890 FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, |
| 906 filter_, make_scoped_refptr(filter))); | 891 filter_, filter)); |
| 907 } | 892 } |
| 908 | 893 |
| 909 uint64_t GpuChannel::GetMemoryUsage() { | 894 uint64_t GpuChannel::GetMemoryUsage() { |
| 910 // Collect the unique memory trackers in use by the |stubs_|. | 895 // Collect the unique memory trackers in use by the |stubs_|. |
| 911 std::set<gles2::MemoryTracker*> unique_memory_trackers; | 896 std::set<gles2::MemoryTracker*> unique_memory_trackers; |
| 912 for (auto& kv : stubs_) | 897 for (auto& kv : stubs_) |
| 913 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); | 898 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); |
| 914 | 899 |
| 915 // Sum the memory usage for all unique memory trackers. | 900 // Sum the memory usage for all unique memory trackers. |
| 916 uint64_t size = 0; | 901 uint64_t size = 0; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 948 | 933 |
| 949 return manager->gpu_memory_buffer_factory() | 934 return manager->gpu_memory_buffer_factory() |
| 950 ->AsImageFactory() | 935 ->AsImageFactory() |
| 951 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, | 936 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, |
| 952 client_id_, surface_handle); | 937 client_id_, surface_handle); |
| 953 } | 938 } |
| 954 } | 939 } |
| 955 } | 940 } |
| 956 | 941 |
| 957 } // namespace gpu | 942 } // namespace gpu |
| OLD | NEW |