OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "content/common/gpu/gpu_channel.h" |
| 6 |
| 7 #include <utility> |
| 8 |
| 9 #if defined(OS_WIN) |
| 10 #include <windows.h> |
| 11 #endif |
| 12 |
| 13 #include <algorithm> |
| 14 #include <deque> |
| 15 #include <set> |
| 16 #include <vector> |
| 17 |
| 18 #include "base/atomicops.h" |
| 19 #include "base/bind.h" |
| 20 #include "base/command_line.h" |
| 21 #include "base/location.h" |
| 22 #include "base/numerics/safe_conversions.h" |
| 23 #include "base/single_thread_task_runner.h" |
| 24 #include "base/stl_util.h" |
| 25 #include "base/strings/string_util.h" |
| 26 #include "base/synchronization/lock.h" |
| 27 #include "base/thread_task_runner_handle.h" |
| 28 #include "base/timer/timer.h" |
| 29 #include "base/trace_event/memory_dump_manager.h" |
| 30 #include "base/trace_event/process_memory_dump.h" |
| 31 #include "base/trace_event/trace_event.h" |
| 32 #include "build/build_config.h" |
| 33 #include "content/common/gpu/gpu_channel_manager.h" |
| 34 #include "content/common/gpu/gpu_channel_manager_delegate.h" |
| 35 #include "content/common/gpu/gpu_memory_buffer_factory.h" |
| 36 #include "gpu/command_buffer/common/mailbox.h" |
| 37 #include "gpu/command_buffer/common/value_state.h" |
| 38 #include "gpu/command_buffer/service/command_executor.h" |
| 39 #include "gpu/command_buffer/service/image_factory.h" |
| 40 #include "gpu/command_buffer/service/mailbox_manager.h" |
| 41 #include "gpu/command_buffer/service/sync_point_manager.h" |
| 42 #include "gpu/command_buffer/service/valuebuffer_manager.h" |
| 43 #include "gpu/ipc/common/gpu_messages.h" |
| 44 #include "ipc/ipc_channel.h" |
| 45 #include "ipc/message_filter.h" |
| 46 #include "ui/gl/gl_context.h" |
| 47 #include "ui/gl/gl_image_shared_memory.h" |
| 48 #include "ui/gl/gl_surface.h" |
| 49 |
| 50 #if defined(OS_POSIX) |
| 51 #include "ipc/ipc_channel_posix.h" |
| 52 #endif |
| 53 |
| 54 namespace content { |
| 55 namespace { |
| 56 |
| 57 // Number of milliseconds between successive vsync. Many GL commands block |
| 58 // on vsync, so thresholds for preemption should be multiples of this. |
| 59 const int64_t kVsyncIntervalMs = 17; |
| 60 |
| 61 // Amount of time that we will wait for an IPC to be processed before |
| 62 // preempting. After a preemption, we must wait this long before triggering |
| 63 // another preemption. |
| 64 const int64_t kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; |
| 65 |
| 66 // Once we trigger a preemption, the maximum duration that we will wait |
| 67 // before clearing the preemption. |
| 68 const int64_t kMaxPreemptTimeMs = kVsyncIntervalMs; |
| 69 |
| 70 // Stop the preemption once the time for the longest pending IPC drops |
| 71 // below this threshold. |
| 72 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 73 |
| 74 } // anonymous namespace |
| 75 |
| 76 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
| 77 int32_t stream_id, |
| 78 gpu::GpuStreamPriority stream_priority, |
| 79 GpuChannel* channel, |
| 80 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, |
| 81 const scoped_refptr<gpu::PreemptionFlag>& preempting_flag, |
| 82 const scoped_refptr<gpu::PreemptionFlag>& preempted_flag, |
| 83 gpu::SyncPointManager* sync_point_manager) { |
| 84 return new GpuChannelMessageQueue(stream_id, stream_priority, channel, |
| 85 io_task_runner, preempting_flag, |
| 86 preempted_flag, sync_point_manager); |
| 87 } |
| 88 |
| 89 scoped_refptr<gpu::SyncPointOrderData> |
| 90 GpuChannelMessageQueue::GetSyncPointOrderData() { |
| 91 return sync_point_order_data_; |
| 92 } |
| 93 |
| 94 GpuChannelMessageQueue::GpuChannelMessageQueue( |
| 95 int32_t stream_id, |
| 96 gpu::GpuStreamPriority stream_priority, |
| 97 GpuChannel* channel, |
| 98 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, |
| 99 const scoped_refptr<gpu::PreemptionFlag>& preempting_flag, |
| 100 const scoped_refptr<gpu::PreemptionFlag>& preempted_flag, |
| 101 gpu::SyncPointManager* sync_point_manager) |
| 102 : stream_id_(stream_id), |
| 103 stream_priority_(stream_priority), |
| 104 enabled_(true), |
| 105 scheduled_(true), |
| 106 channel_(channel), |
| 107 preemption_state_(IDLE), |
| 108 max_preemption_time_( |
| 109 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), |
| 110 timer_(new base::OneShotTimer), |
| 111 sync_point_order_data_(gpu::SyncPointOrderData::Create()), |
| 112 io_task_runner_(io_task_runner), |
| 113 preempting_flag_(preempting_flag), |
| 114 preempted_flag_(preempted_flag), |
| 115 sync_point_manager_(sync_point_manager) { |
| 116 timer_->SetTaskRunner(io_task_runner); |
| 117 io_thread_checker_.DetachFromThread(); |
| 118 } |
| 119 |
| 120 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| 121 DCHECK(!enabled_); |
| 122 DCHECK(channel_messages_.empty()); |
| 123 } |
| 124 |
| 125 void GpuChannelMessageQueue::Disable() { |
| 126 { |
| 127 base::AutoLock auto_lock(channel_lock_); |
| 128 DCHECK(enabled_); |
| 129 enabled_ = false; |
| 130 } |
| 131 |
| 132 // We guarantee that the queues will no longer be modified after enabled_ |
| 133 // is set to false, it is now safe to modify the queue without the lock. |
| 134 // All public facing modifying functions check enabled_ while all |
| 135 // private modifying functions DCHECK(enabled_) to enforce this. |
| 136 while (!channel_messages_.empty()) { |
| 137 const IPC::Message& msg = channel_messages_.front()->message; |
| 138 if (msg.is_sync()) { |
| 139 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
| 140 reply->set_reply_error(); |
| 141 channel_->Send(reply); |
| 142 } |
| 143 channel_messages_.pop_front(); |
| 144 } |
| 145 |
| 146 sync_point_order_data_->Destroy(); |
| 147 sync_point_order_data_ = nullptr; |
| 148 |
| 149 io_task_runner_->PostTask( |
| 150 FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this)); |
| 151 } |
| 152 |
| 153 void GpuChannelMessageQueue::DisableIO() { |
| 154 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 155 timer_ = nullptr; |
| 156 } |
| 157 |
| 158 bool GpuChannelMessageQueue::IsScheduled() const { |
| 159 base::AutoLock lock(channel_lock_); |
| 160 return scheduled_; |
| 161 } |
| 162 |
| 163 void GpuChannelMessageQueue::OnRescheduled(bool scheduled) { |
| 164 base::AutoLock lock(channel_lock_); |
| 165 DCHECK(enabled_); |
| 166 if (scheduled_ == scheduled) |
| 167 return; |
| 168 scheduled_ = scheduled; |
| 169 if (scheduled) |
| 170 channel_->PostHandleMessage(this); |
| 171 if (preempting_flag_) { |
| 172 io_task_runner_->PostTask( |
| 173 FROM_HERE, |
| 174 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 175 } |
| 176 } |
| 177 |
| 178 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
| 179 return sync_point_order_data_->unprocessed_order_num(); |
| 180 } |
| 181 |
| 182 uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const { |
| 183 return sync_point_order_data_->processed_order_num(); |
| 184 } |
| 185 |
| 186 bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| 187 base::AutoLock auto_lock(channel_lock_); |
| 188 if (enabled_) { |
| 189 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 190 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 191 channel_->PostHandleOutOfOrderMessage(message); |
| 192 return true; |
| 193 } |
| 194 |
| 195 uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber( |
| 196 sync_point_manager_); |
| 197 scoped_ptr<GpuChannelMessage> msg( |
| 198 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); |
| 199 |
| 200 if (channel_messages_.empty()) { |
| 201 DCHECK(scheduled_); |
| 202 channel_->PostHandleMessage(this); |
| 203 } |
| 204 |
| 205 channel_messages_.push_back(std::move(msg)); |
| 206 |
| 207 if (preempting_flag_) |
| 208 UpdatePreemptionStateHelper(); |
| 209 |
| 210 return true; |
| 211 } |
| 212 return false; |
| 213 } |
| 214 |
| 215 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { |
| 216 base::AutoLock auto_lock(channel_lock_); |
| 217 DCHECK(enabled_); |
| 218 // If we have been preempted by another channel, just post a task to wake up. |
| 219 if (preempted_flag_ && preempted_flag_->IsSet()) { |
| 220 channel_->PostHandleMessage(this); |
| 221 return nullptr; |
| 222 } |
| 223 if (channel_messages_.empty()) |
| 224 return nullptr; |
| 225 sync_point_order_data_->BeginProcessingOrderNumber( |
| 226 channel_messages_.front()->order_number); |
| 227 return channel_messages_.front().get(); |
| 228 } |
| 229 |
| 230 void GpuChannelMessageQueue::PauseMessageProcessing() { |
| 231 base::AutoLock auto_lock(channel_lock_); |
| 232 DCHECK(!channel_messages_.empty()); |
| 233 |
| 234 // If we have been preempted by another channel, just post a task to wake up. |
| 235 if (scheduled_) |
| 236 channel_->PostHandleMessage(this); |
| 237 |
| 238 sync_point_order_data_->PauseProcessingOrderNumber( |
| 239 channel_messages_.front()->order_number); |
| 240 } |
| 241 |
| 242 void GpuChannelMessageQueue::FinishMessageProcessing() { |
| 243 base::AutoLock auto_lock(channel_lock_); |
| 244 DCHECK(!channel_messages_.empty()); |
| 245 DCHECK(scheduled_); |
| 246 |
| 247 sync_point_order_data_->FinishProcessingOrderNumber( |
| 248 channel_messages_.front()->order_number); |
| 249 channel_messages_.pop_front(); |
| 250 |
| 251 if (!channel_messages_.empty()) |
| 252 channel_->PostHandleMessage(this); |
| 253 |
| 254 if (preempting_flag_) { |
| 255 io_task_runner_->PostTask( |
| 256 FROM_HERE, |
| 257 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); |
| 258 } |
| 259 } |
| 260 |
| 261 void GpuChannelMessageQueue::UpdatePreemptionState() { |
| 262 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 263 DCHECK(preempting_flag_); |
| 264 base::AutoLock lock(channel_lock_); |
| 265 UpdatePreemptionStateHelper(); |
| 266 } |
| 267 |
| 268 void GpuChannelMessageQueue::UpdatePreemptionStateHelper() { |
| 269 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 270 DCHECK(preempting_flag_); |
| 271 channel_lock_.AssertAcquired(); |
| 272 switch (preemption_state_) { |
| 273 case IDLE: |
| 274 UpdateStateIdle(); |
| 275 break; |
| 276 case WAITING: |
| 277 UpdateStateWaiting(); |
| 278 break; |
| 279 case CHECKING: |
| 280 UpdateStateChecking(); |
| 281 break; |
| 282 case PREEMPTING: |
| 283 UpdateStatePreempting(); |
| 284 break; |
| 285 case WOULD_PREEMPT_DESCHEDULED: |
| 286 UpdateStateWouldPreemptDescheduled(); |
| 287 break; |
| 288 default: |
| 289 NOTREACHED(); |
| 290 } |
| 291 } |
| 292 |
| 293 void GpuChannelMessageQueue::UpdateStateIdle() { |
| 294 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 295 DCHECK(preempting_flag_); |
| 296 channel_lock_.AssertAcquired(); |
| 297 DCHECK(!timer_->IsRunning()); |
| 298 if (!channel_messages_.empty()) |
| 299 TransitionToWaiting(); |
| 300 } |
| 301 |
| 302 void GpuChannelMessageQueue::UpdateStateWaiting() { |
| 303 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 304 DCHECK(preempting_flag_); |
| 305 channel_lock_.AssertAcquired(); |
| 306 // Transition to CHECKING if timer fired. |
| 307 if (!timer_->IsRunning()) |
| 308 TransitionToChecking(); |
| 309 } |
| 310 |
| 311 void GpuChannelMessageQueue::UpdateStateChecking() { |
| 312 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 313 DCHECK(preempting_flag_); |
| 314 channel_lock_.AssertAcquired(); |
| 315 if (!channel_messages_.empty()) { |
| 316 base::TimeTicks time_recv = channel_messages_.front()->time_received; |
| 317 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_recv; |
| 318 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { |
| 319 // Schedule another check for when the IPC may go long. |
| 320 timer_->Start( |
| 321 FROM_HERE, |
| 322 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - time_elapsed, |
| 323 this, &GpuChannelMessageQueue::UpdatePreemptionState); |
| 324 } else { |
| 325 timer_->Stop(); |
| 326 if (!scheduled_) |
| 327 TransitionToWouldPreemptDescheduled(); |
| 328 else |
| 329 TransitionToPreempting(); |
| 330 } |
| 331 } |
| 332 } |
| 333 |
| 334 void GpuChannelMessageQueue::UpdateStatePreempting() { |
| 335 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 336 DCHECK(preempting_flag_); |
| 337 channel_lock_.AssertAcquired(); |
| 338 // We should stop preempting if the timer fired or for other conditions. |
| 339 if (!timer_->IsRunning() || ShouldTransitionToIdle()) { |
| 340 TransitionToIdle(); |
| 341 } else if (!scheduled_) { |
| 342 // Save the remaining preemption time before stopping the timer. |
| 343 max_preemption_time_ = timer_->desired_run_time() - base::TimeTicks::Now(); |
| 344 timer_->Stop(); |
| 345 TransitionToWouldPreemptDescheduled(); |
| 346 } |
| 347 } |
| 348 |
| 349 void GpuChannelMessageQueue::UpdateStateWouldPreemptDescheduled() { |
| 350 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 351 DCHECK(preempting_flag_); |
| 352 channel_lock_.AssertAcquired(); |
| 353 DCHECK(!timer_->IsRunning()); |
| 354 if (ShouldTransitionToIdle()) { |
| 355 TransitionToIdle(); |
| 356 } else if (scheduled_) { |
| 357 TransitionToPreempting(); |
| 358 } |
| 359 } |
| 360 |
| 361 bool GpuChannelMessageQueue::ShouldTransitionToIdle() const { |
| 362 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 363 DCHECK(preempting_flag_); |
| 364 channel_lock_.AssertAcquired(); |
| 365 DCHECK(preemption_state_ == PREEMPTING || |
| 366 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 367 if (channel_messages_.empty()) { |
| 368 return true; |
| 369 } else { |
| 370 base::TimeTicks next_tick = channel_messages_.front()->time_received; |
| 371 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick; |
| 372 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) |
| 373 return true; |
| 374 } |
| 375 return false; |
| 376 } |
| 377 |
| 378 void GpuChannelMessageQueue::TransitionToIdle() { |
| 379 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 380 DCHECK(preempting_flag_); |
| 381 channel_lock_.AssertAcquired(); |
| 382 DCHECK(preemption_state_ == PREEMPTING || |
| 383 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 384 |
| 385 preemption_state_ = IDLE; |
| 386 preempting_flag_->Reset(); |
| 387 |
| 388 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs); |
| 389 timer_->Stop(); |
| 390 |
| 391 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 392 |
| 393 UpdateStateIdle(); |
| 394 } |
| 395 |
| 396 void GpuChannelMessageQueue::TransitionToWaiting() { |
| 397 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 398 DCHECK(preempting_flag_); |
| 399 channel_lock_.AssertAcquired(); |
| 400 DCHECK_EQ(preemption_state_, IDLE); |
| 401 DCHECK(!timer_->IsRunning()); |
| 402 |
| 403 preemption_state_ = WAITING; |
| 404 |
| 405 timer_->Start(FROM_HERE, |
| 406 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs), this, |
| 407 &GpuChannelMessageQueue::UpdatePreemptionState); |
| 408 } |
| 409 |
| 410 void GpuChannelMessageQueue::TransitionToChecking() { |
| 411 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 412 DCHECK(preempting_flag_); |
| 413 channel_lock_.AssertAcquired(); |
| 414 DCHECK_EQ(preemption_state_, WAITING); |
| 415 DCHECK(!timer_->IsRunning()); |
| 416 |
| 417 preemption_state_ = CHECKING; |
| 418 |
| 419 UpdateStateChecking(); |
| 420 } |
| 421 |
| 422 void GpuChannelMessageQueue::TransitionToPreempting() { |
| 423 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 424 DCHECK(preempting_flag_); |
| 425 channel_lock_.AssertAcquired(); |
| 426 DCHECK(preemption_state_ == CHECKING || |
| 427 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 428 DCHECK(scheduled_); |
| 429 |
| 430 preemption_state_ = PREEMPTING; |
| 431 preempting_flag_->Set(); |
| 432 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1); |
| 433 |
| 434 DCHECK_LE(max_preemption_time_, |
| 435 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)); |
| 436 timer_->Start(FROM_HERE, max_preemption_time_, this, |
| 437 &GpuChannelMessageQueue::UpdatePreemptionState); |
| 438 } |
| 439 |
| 440 void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() { |
| 441 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 442 DCHECK(preempting_flag_); |
| 443 channel_lock_.AssertAcquired(); |
| 444 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); |
| 445 DCHECK(!scheduled_); |
| 446 |
| 447 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
| 448 preempting_flag_->Reset(); |
| 449 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 450 } |
| 451 |
| 452 GpuChannelMessageFilter::GpuChannelMessageFilter() |
| 453 : sender_(nullptr), peer_pid_(base::kNullProcessId) {} |
| 454 |
| 455 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} |
| 456 |
| 457 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender* sender) { |
| 458 DCHECK(!sender_); |
| 459 sender_ = sender; |
| 460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 461 filter->OnFilterAdded(sender_); |
| 462 } |
| 463 } |
| 464 |
| 465 void GpuChannelMessageFilter::OnFilterRemoved() { |
| 466 DCHECK(sender_); |
| 467 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 468 filter->OnFilterRemoved(); |
| 469 } |
| 470 sender_ = nullptr; |
| 471 peer_pid_ = base::kNullProcessId; |
| 472 } |
| 473 |
| 474 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { |
| 475 DCHECK(peer_pid_ == base::kNullProcessId); |
| 476 peer_pid_ = peer_pid; |
| 477 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 478 filter->OnChannelConnected(peer_pid); |
| 479 } |
| 480 } |
| 481 |
| 482 void GpuChannelMessageFilter::OnChannelError() { |
| 483 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 484 filter->OnChannelError(); |
| 485 } |
| 486 } |
| 487 |
| 488 void GpuChannelMessageFilter::OnChannelClosing() { |
| 489 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 490 filter->OnChannelClosing(); |
| 491 } |
| 492 } |
| 493 |
| 494 void GpuChannelMessageFilter::AddChannelFilter( |
| 495 scoped_refptr<IPC::MessageFilter> filter) { |
| 496 channel_filters_.push_back(filter); |
| 497 if (sender_) |
| 498 filter->OnFilterAdded(sender_); |
| 499 if (peer_pid_ != base::kNullProcessId) |
| 500 filter->OnChannelConnected(peer_pid_); |
| 501 } |
| 502 |
| 503 void GpuChannelMessageFilter::RemoveChannelFilter( |
| 504 scoped_refptr<IPC::MessageFilter> filter) { |
| 505 if (sender_) |
| 506 filter->OnFilterRemoved(); |
| 507 channel_filters_.erase( |
| 508 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); |
| 509 } |
| 510 |
| 511 // This gets called from the main thread and assumes that all messages which |
| 512 // lead to creation of a new route are synchronous messages. |
| 513 // TODO(sunnyps): Create routes (and streams) on the IO thread so that we can |
| 514 // make the CreateCommandBuffer/VideoDecoder/VideoEncoder messages asynchronous. |
| 515 void GpuChannelMessageFilter::AddRoute( |
| 516 int32_t route_id, |
| 517 const scoped_refptr<GpuChannelMessageQueue>& queue) { |
| 518 base::AutoLock lock(routes_lock_); |
| 519 routes_.insert(std::make_pair(route_id, queue)); |
| 520 } |
| 521 |
| 522 void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) { |
| 523 base::AutoLock lock(routes_lock_); |
| 524 routes_.erase(route_id); |
| 525 } |
| 526 |
| 527 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| 528 DCHECK(sender_); |
| 529 |
| 530 if (message.should_unblock() || message.is_reply()) |
| 531 return MessageErrorHandler(message, "Unexpected message type"); |
| 532 |
| 533 if (message.type() == GpuChannelMsg_Nop::ID) { |
| 534 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 535 Send(reply); |
| 536 return true; |
| 537 } |
| 538 |
| 539 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 540 if (filter->OnMessageReceived(message)) |
| 541 return true; |
| 542 } |
| 543 |
| 544 scoped_refptr<GpuChannelMessageQueue> message_queue = |
| 545 LookupStreamByRoute(message.routing_id()); |
| 546 |
| 547 if (!message_queue) |
| 548 return MessageErrorHandler(message, "Could not find message queue"); |
| 549 |
| 550 if (!message_queue->PushBackMessage(message)) |
| 551 return MessageErrorHandler(message, "Channel destroyed"); |
| 552 |
| 553 return true; |
| 554 } |
| 555 |
| 556 bool GpuChannelMessageFilter::Send(IPC::Message* message) { |
| 557 return sender_->Send(message); |
| 558 } |
| 559 |
| 560 scoped_refptr<GpuChannelMessageQueue> |
| 561 GpuChannelMessageFilter::LookupStreamByRoute(int32_t route_id) { |
| 562 base::AutoLock lock(routes_lock_); |
| 563 auto it = routes_.find(route_id); |
| 564 if (it != routes_.end()) |
| 565 return it->second; |
| 566 return nullptr; |
| 567 } |
| 568 |
| 569 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, |
| 570 const char* error_msg) { |
| 571 DLOG(ERROR) << error_msg; |
| 572 if (message.is_sync()) { |
| 573 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 574 reply->set_reply_error(); |
| 575 Send(reply); |
| 576 } |
| 577 return true; |
| 578 } |
| 579 |
| 580 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| 581 gpu::SyncPointManager* sync_point_manager, |
| 582 GpuWatchdog* watchdog, |
| 583 gfx::GLShareGroup* share_group, |
| 584 gpu::gles2::MailboxManager* mailbox, |
| 585 gpu::PreemptionFlag* preempting_flag, |
| 586 gpu::PreemptionFlag* preempted_flag, |
| 587 base::SingleThreadTaskRunner* task_runner, |
| 588 base::SingleThreadTaskRunner* io_task_runner, |
| 589 int32_t client_id, |
| 590 uint64_t client_tracing_id, |
| 591 bool allow_view_command_buffers, |
| 592 bool allow_real_time_streams) |
| 593 : gpu_channel_manager_(gpu_channel_manager), |
| 594 sync_point_manager_(sync_point_manager), |
| 595 unhandled_message_listener_(nullptr), |
| 596 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), |
| 597 preempting_flag_(preempting_flag), |
| 598 preempted_flag_(preempted_flag), |
| 599 client_id_(client_id), |
| 600 client_tracing_id_(client_tracing_id), |
| 601 task_runner_(task_runner), |
| 602 io_task_runner_(io_task_runner), |
| 603 share_group_(share_group), |
| 604 mailbox_manager_(mailbox), |
| 605 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
| 606 pending_valuebuffer_state_(new gpu::ValueStateMap), |
| 607 watchdog_(watchdog), |
| 608 allow_view_command_buffers_(allow_view_command_buffers), |
| 609 allow_real_time_streams_(allow_real_time_streams), |
| 610 weak_factory_(this) { |
| 611 DCHECK(gpu_channel_manager); |
| 612 DCHECK(client_id); |
| 613 |
| 614 filter_ = new GpuChannelMessageFilter(); |
| 615 |
| 616 scoped_refptr<GpuChannelMessageQueue> control_queue = |
| 617 CreateStream(gpu::GPU_STREAM_DEFAULT, gpu::GpuStreamPriority::HIGH); |
| 618 AddRouteToStream(MSG_ROUTING_CONTROL, gpu::GPU_STREAM_DEFAULT); |
| 619 |
| 620 subscription_ref_set_->AddObserver(this); |
| 621 } |
| 622 |
| 623 GpuChannel::~GpuChannel() { |
| 624 // Clear stubs first because of dependencies. |
| 625 stubs_.clear(); |
| 626 |
| 627 for (auto& kv : streams_) |
| 628 kv.second->Disable(); |
| 629 |
| 630 subscription_ref_set_->RemoveObserver(this); |
| 631 if (preempting_flag_.get()) |
| 632 preempting_flag_->Reset(); |
| 633 } |
| 634 |
| 635 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { |
| 636 DCHECK(shutdown_event); |
| 637 DCHECK(!channel_); |
| 638 |
| 639 IPC::ChannelHandle channel_handle(channel_id_); |
| 640 |
| 641 channel_ = |
| 642 IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_SERVER, this, |
| 643 io_task_runner_, false, shutdown_event); |
| 644 |
| 645 #if defined(OS_POSIX) |
| 646 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so |
| 647 // that it gets closed after it has been sent. |
| 648 base::ScopedFD renderer_fd = channel_->TakeClientFileDescriptor(); |
| 649 DCHECK(renderer_fd.is_valid()); |
| 650 channel_handle.socket = base::FileDescriptor(std::move(renderer_fd)); |
| 651 #endif |
| 652 |
| 653 channel_->AddFilter(filter_.get()); |
| 654 |
| 655 return channel_handle; |
| 656 } |
| 657 |
| 658 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { |
| 659 unhandled_message_listener_ = listener; |
| 660 } |
| 661 |
| 662 base::WeakPtr<GpuChannel> GpuChannel::AsWeakPtr() { |
| 663 return weak_factory_.GetWeakPtr(); |
| 664 } |
| 665 |
| 666 base::ProcessId GpuChannel::GetClientPID() const { |
| 667 return channel_->GetPeerPID(); |
| 668 } |
| 669 |
| 670 uint32_t GpuChannel::GetProcessedOrderNum() const { |
| 671 uint32_t processed_order_num = 0; |
| 672 for (auto& kv : streams_) { |
| 673 processed_order_num = |
| 674 std::max(processed_order_num, kv.second->GetProcessedOrderNum()); |
| 675 } |
| 676 return processed_order_num; |
| 677 } |
| 678 |
| 679 uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
| 680 uint32_t unprocessed_order_num = 0; |
| 681 for (auto& kv : streams_) { |
| 682 unprocessed_order_num = |
| 683 std::max(unprocessed_order_num, kv.second->GetUnprocessedOrderNum()); |
| 684 } |
| 685 return unprocessed_order_num; |
| 686 } |
| 687 |
| 688 bool GpuChannel::OnMessageReceived(const IPC::Message& msg) { |
| 689 // All messages should be pushed to channel_messages_ and handled separately. |
| 690 NOTREACHED(); |
| 691 return false; |
| 692 } |
| 693 |
| 694 void GpuChannel::OnChannelError() { |
| 695 gpu_channel_manager_->RemoveChannel(client_id_); |
| 696 } |
| 697 |
| 698 bool GpuChannel::Send(IPC::Message* message) { |
| 699 // The GPU process must never send a synchronous IPC message to the renderer |
| 700 // process. This could result in deadlock. |
| 701 DCHECK(!message->is_sync()); |
| 702 |
| 703 DVLOG(1) << "sending message @" << message << " on channel @" << this |
| 704 << " with type " << message->type(); |
| 705 |
| 706 if (!channel_) { |
| 707 delete message; |
| 708 return false; |
| 709 } |
| 710 |
| 711 return channel_->Send(message); |
| 712 } |
| 713 |
| 714 void GpuChannel::OnAddSubscription(unsigned int target) { |
| 715 gpu_channel_manager()->delegate()->AddSubscription(client_id_, target); |
| 716 } |
| 717 |
| 718 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
| 719 gpu_channel_manager()->delegate()->RemoveSubscription(client_id_, target); |
| 720 } |
| 721 |
| 722 void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) { |
| 723 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); |
| 724 DCHECK(queue); |
| 725 queue->OnRescheduled(scheduled); |
| 726 } |
| 727 |
| 728 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { |
| 729 return stubs_.get(route_id); |
| 730 } |
| 731 |
| 732 void GpuChannel::LoseAllContexts() { |
| 733 gpu_channel_manager_->LoseAllContexts(); |
| 734 } |
| 735 |
| 736 void GpuChannel::MarkAllContextsLost() { |
| 737 for (auto& kv : stubs_) |
| 738 kv.second->MarkContextLost(); |
| 739 } |
| 740 |
| 741 bool GpuChannel::AddRoute(int32_t route_id, |
| 742 int32_t stream_id, |
| 743 IPC::Listener* listener) { |
| 744 if (router_.AddRoute(route_id, listener)) { |
| 745 AddRouteToStream(route_id, stream_id); |
| 746 return true; |
| 747 } |
| 748 return false; |
| 749 } |
| 750 |
| 751 void GpuChannel::RemoveRoute(int32_t route_id) { |
| 752 router_.RemoveRoute(route_id); |
| 753 RemoveRouteFromStream(route_id); |
| 754 } |
| 755 |
| 756 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
| 757 bool handled = true; |
| 758 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) |
| 759 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer, |
| 760 OnCreateCommandBuffer) |
| 761 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, |
| 762 OnDestroyCommandBuffer) |
| 763 IPC_MESSAGE_UNHANDLED(handled = false) |
| 764 IPC_END_MESSAGE_MAP() |
| 765 return handled; |
| 766 } |
| 767 |
| 768 scoped_refptr<gpu::SyncPointOrderData> GpuChannel::GetSyncPointOrderData( |
| 769 int32_t stream_id) { |
| 770 auto it = streams_.find(stream_id); |
| 771 DCHECK(it != streams_.end()); |
| 772 DCHECK(it->second); |
| 773 return it->second->GetSyncPointOrderData(); |
| 774 } |
| 775 |
| 776 void GpuChannel::PostHandleMessage( |
| 777 const scoped_refptr<GpuChannelMessageQueue>& queue) { |
| 778 task_runner_->PostTask(FROM_HERE, |
| 779 base::Bind(&GpuChannel::HandleMessage, |
| 780 weak_factory_.GetWeakPtr(), queue)); |
| 781 } |
| 782 |
| 783 void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { |
| 784 task_runner_->PostTask(FROM_HERE, |
| 785 base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
| 786 weak_factory_.GetWeakPtr(), msg)); |
| 787 } |
| 788 |
| 789 void GpuChannel::HandleMessage( |
| 790 const scoped_refptr<GpuChannelMessageQueue>& message_queue) { |
| 791 const GpuChannelMessage* channel_msg = |
| 792 message_queue->BeginMessageProcessing(); |
| 793 if (!channel_msg) |
| 794 return; |
| 795 |
| 796 const IPC::Message& msg = channel_msg->message; |
| 797 int32_t routing_id = msg.routing_id(); |
| 798 GpuCommandBufferStub* stub = stubs_.get(routing_id); |
| 799 |
| 800 DCHECK(!stub || stub->IsScheduled()); |
| 801 |
| 802 DVLOG(1) << "received message @" << &msg << " on channel @" << this |
| 803 << " with type " << msg.type(); |
| 804 |
| 805 HandleMessageHelper(msg); |
| 806 |
| 807 // If we get descheduled or yield while processing a message. |
| 808 if (stub && stub->HasUnprocessedCommands()) { |
| 809 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, msg.type()); |
| 810 message_queue->PauseMessageProcessing(); |
| 811 } else { |
| 812 message_queue->FinishMessageProcessing(); |
| 813 } |
| 814 } |
| 815 |
| 816 void GpuChannel::HandleMessageHelper(const IPC::Message& msg) { |
| 817 int32_t routing_id = msg.routing_id(); |
| 818 |
| 819 bool handled = false; |
| 820 if (routing_id == MSG_ROUTING_CONTROL) { |
| 821 handled = OnControlMessageReceived(msg); |
| 822 } else { |
| 823 handled = router_.RouteMessage(msg); |
| 824 } |
| 825 |
| 826 if (!handled && unhandled_message_listener_) |
| 827 handled = unhandled_message_listener_->OnMessageReceived(msg); |
| 828 |
| 829 // Respond to sync messages even if router failed to route. |
| 830 if (!handled && msg.is_sync()) { |
| 831 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
| 832 reply->set_reply_error(); |
| 833 Send(reply); |
| 834 } |
| 835 } |
| 836 |
| 837 void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) { |
| 838 HandleMessageHelper(msg); |
| 839 } |
| 840 |
| 841 void GpuChannel::HandleMessageForTesting(const IPC::Message& msg) { |
| 842 HandleMessageHelper(msg); |
| 843 } |
| 844 |
| 845 scoped_refptr<GpuChannelMessageQueue> GpuChannel::CreateStream( |
| 846 int32_t stream_id, |
| 847 gpu::GpuStreamPriority stream_priority) { |
| 848 DCHECK(streams_.find(stream_id) == streams_.end()); |
| 849 scoped_refptr<GpuChannelMessageQueue> queue = GpuChannelMessageQueue::Create( |
| 850 stream_id, stream_priority, this, io_task_runner_, |
| 851 (stream_id == gpu::GPU_STREAM_DEFAULT) ? preempting_flag_ : nullptr, |
| 852 preempted_flag_, sync_point_manager_); |
| 853 streams_.insert(std::make_pair(stream_id, queue)); |
| 854 streams_to_num_routes_.insert(std::make_pair(stream_id, 0)); |
| 855 return queue; |
| 856 } |
| 857 |
| 858 scoped_refptr<GpuChannelMessageQueue> GpuChannel::LookupStream( |
| 859 int32_t stream_id) { |
| 860 auto stream_it = streams_.find(stream_id); |
| 861 if (stream_it != streams_.end()) |
| 862 return stream_it->second; |
| 863 return nullptr; |
| 864 } |
| 865 |
| 866 void GpuChannel::DestroyStreamIfNecessary( |
| 867 const scoped_refptr<GpuChannelMessageQueue>& queue) { |
| 868 int32_t stream_id = queue->stream_id(); |
| 869 if (streams_to_num_routes_[stream_id] == 0) { |
| 870 queue->Disable(); |
| 871 streams_to_num_routes_.erase(stream_id); |
| 872 streams_.erase(stream_id); |
| 873 } |
| 874 } |
| 875 |
| 876 void GpuChannel::AddRouteToStream(int32_t route_id, int32_t stream_id) { |
| 877 DCHECK(streams_.find(stream_id) != streams_.end()); |
| 878 DCHECK(routes_to_streams_.find(route_id) == routes_to_streams_.end()); |
| 879 streams_to_num_routes_[stream_id]++; |
| 880 routes_to_streams_.insert(std::make_pair(route_id, stream_id)); |
| 881 filter_->AddRoute(route_id, streams_[stream_id]); |
| 882 } |
| 883 |
| 884 void GpuChannel::RemoveRouteFromStream(int32_t route_id) { |
| 885 DCHECK(routes_to_streams_.find(route_id) != routes_to_streams_.end()); |
| 886 int32_t stream_id = routes_to_streams_[route_id]; |
| 887 DCHECK(streams_.find(stream_id) != streams_.end()); |
| 888 routes_to_streams_.erase(route_id); |
| 889 streams_to_num_routes_[stream_id]--; |
| 890 filter_->RemoveRoute(route_id); |
| 891 DestroyStreamIfNecessary(streams_[stream_id]); |
| 892 } |
| 893 |
| 894 #if defined(OS_ANDROID) |
| 895 const GpuCommandBufferStub* GpuChannel::GetOneStub() const { |
| 896 for (const auto& kv : stubs_) { |
| 897 const GpuCommandBufferStub* stub = kv.second; |
| 898 if (stub->decoder() && !stub->decoder()->WasContextLost()) |
| 899 return stub; |
| 900 } |
| 901 return nullptr; |
| 902 } |
| 903 #endif |
| 904 |
| 905 void GpuChannel::OnCreateCommandBuffer( |
| 906 gpu::SurfaceHandle surface_handle, |
| 907 const gfx::Size& size, |
| 908 const GPUCreateCommandBufferConfig& init_params, |
| 909 int32_t route_id, |
| 910 bool* succeeded) { |
| 911 TRACE_EVENT2("gpu", "GpuChannel::OnCreateCommandBuffer", "route_id", route_id, |
| 912 "offscreen", (surface_handle == gpu::kNullSurfaceHandle)); |
| 913 *succeeded = false; |
| 914 if (surface_handle != gpu::kNullSurfaceHandle && |
| 915 !allow_view_command_buffers_) { |
| 916 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " |
| 917 "view context on a non-priviledged channel"; |
| 918 return; |
| 919 } |
| 920 |
| 921 int32_t share_group_id = init_params.share_group_id; |
| 922 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); |
| 923 |
| 924 if (!share_group && share_group_id != MSG_ROUTING_NONE) { |
| 925 DLOG(ERROR) |
| 926 << "GpuChannel::OnCreateCommandBuffer(): invalid share group id"; |
| 927 return; |
| 928 } |
| 929 |
| 930 int32_t stream_id = init_params.stream_id; |
| 931 if (share_group && stream_id != share_group->stream_id()) { |
| 932 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): stream id does not " |
| 933 "match share group stream id"; |
| 934 return; |
| 935 } |
| 936 |
| 937 gpu::GpuStreamPriority stream_priority = init_params.stream_priority; |
| 938 if (!allow_real_time_streams_ && |
| 939 stream_priority == gpu::GpuStreamPriority::REAL_TIME) { |
| 940 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): real time stream " |
| 941 "priority not allowed"; |
| 942 return; |
| 943 } |
| 944 |
| 945 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( |
| 946 this, sync_point_manager_, task_runner_.get(), share_group, |
| 947 surface_handle, mailbox_manager_.get(), preempted_flag_.get(), |
| 948 subscription_ref_set_.get(), pending_valuebuffer_state_.get(), size, |
| 949 disallowed_features_, init_params.attribs, init_params.gpu_preference, |
| 950 init_params.stream_id, route_id, watchdog_, init_params.active_url)); |
| 951 |
| 952 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); |
| 953 if (!queue) |
| 954 queue = CreateStream(stream_id, stream_priority); |
| 955 |
| 956 if (!AddRoute(route_id, stream_id, stub.get())) { |
| 957 DestroyStreamIfNecessary(queue); |
| 958 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): failed to add route"; |
| 959 return; |
| 960 } |
| 961 |
| 962 stubs_.set(route_id, std::move(stub)); |
| 963 *succeeded = true; |
| 964 } |
| 965 |
| 966 void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) { |
| 967 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", |
| 968 "route_id", route_id); |
| 969 |
| 970 scoped_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id); |
| 971 // In case the renderer is currently blocked waiting for a sync reply from the |
| 972 // stub, we need to make sure to reschedule the correct stream here. |
| 973 if (stub && !stub->IsScheduled()) { |
| 974 // This stub won't get a chance to reschedule the stream so do that now. |
| 975 OnStreamRescheduled(stub->stream_id(), true); |
| 976 } |
| 977 |
| 978 RemoveRoute(route_id); |
| 979 } |
| 980 |
| 981 void GpuChannel::CacheShader(const std::string& key, |
| 982 const std::string& shader) { |
| 983 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); |
| 984 } |
| 985 |
| 986 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| 987 io_task_runner_->PostTask( |
| 988 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, |
| 989 filter_, make_scoped_refptr(filter))); |
| 990 } |
| 991 |
| 992 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { |
| 993 io_task_runner_->PostTask( |
| 994 FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, |
| 995 filter_, make_scoped_refptr(filter))); |
| 996 } |
| 997 |
| 998 uint64_t GpuChannel::GetMemoryUsage() { |
| 999 // Collect the unique memory trackers in use by the |stubs_|. |
| 1000 std::set<gpu::gles2::MemoryTracker*> unique_memory_trackers; |
| 1001 for (auto& kv : stubs_) |
| 1002 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); |
| 1003 |
| 1004 // Sum the memory usage for all unique memory trackers. |
| 1005 uint64_t size = 0; |
| 1006 for (auto* tracker : unique_memory_trackers) { |
| 1007 size += gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage( |
| 1008 tracker); |
| 1009 } |
| 1010 |
| 1011 return size; |
| 1012 } |
| 1013 |
| 1014 scoped_refptr<gl::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer( |
| 1015 const gfx::GpuMemoryBufferHandle& handle, |
| 1016 const gfx::Size& size, |
| 1017 gfx::BufferFormat format, |
| 1018 uint32_t internalformat) { |
| 1019 switch (handle.type) { |
| 1020 case gfx::SHARED_MEMORY_BUFFER: { |
| 1021 if (!base::IsValueInRangeForNumericType<size_t>(handle.stride)) |
| 1022 return nullptr; |
| 1023 scoped_refptr<gl::GLImageSharedMemory> image( |
| 1024 new gl::GLImageSharedMemory(size, internalformat)); |
| 1025 if (!image->Initialize(handle.handle, handle.id, format, handle.offset, |
| 1026 handle.stride)) { |
| 1027 return nullptr; |
| 1028 } |
| 1029 |
| 1030 return image; |
| 1031 } |
| 1032 default: { |
| 1033 GpuChannelManager* manager = gpu_channel_manager(); |
| 1034 if (!manager->gpu_memory_buffer_factory()) |
| 1035 return nullptr; |
| 1036 |
| 1037 return manager->gpu_memory_buffer_factory() |
| 1038 ->AsImageFactory() |
| 1039 ->CreateImageForGpuMemoryBuffer(handle, |
| 1040 size, |
| 1041 format, |
| 1042 internalformat, |
| 1043 client_id_); |
| 1044 } |
| 1045 } |
| 1046 } |
| 1047 |
| 1048 void GpuChannel::HandleUpdateValueState( |
| 1049 unsigned int target, const gpu::ValueState& state) { |
| 1050 pending_valuebuffer_state_->UpdateState(target, state); |
| 1051 } |
| 1052 |
| 1053 } // namespace content |
OLD | NEW |