| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "cc/resources/one_copy_tile_task_worker_pool.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <limits> | |
| 9 | |
| 10 #include "base/strings/stringprintf.h" | |
| 11 #include "base/trace_event/trace_event.h" | |
| 12 #include "base/trace_event/trace_event_argument.h" | |
| 13 #include "cc/debug/traced_value.h" | |
| 14 #include "cc/resources/raster_buffer.h" | |
| 15 #include "cc/resources/resource_pool.h" | |
| 16 #include "cc/resources/scoped_resource.h" | |
| 17 #include "gpu/command_buffer/client/gles2_interface.h" | |
| 18 #include "ui/gfx/gpu_memory_buffer.h" | |
| 19 | |
| 20 namespace cc { | |
| 21 namespace { | |
| 22 | |
| 23 class RasterBufferImpl : public RasterBuffer { | |
| 24 public: | |
| 25 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | |
| 26 ResourceProvider* resource_provider, | |
| 27 ResourcePool* resource_pool, | |
| 28 ResourceFormat resource_format, | |
| 29 const Resource* resource) | |
| 30 : worker_pool_(worker_pool), | |
| 31 resource_provider_(resource_provider), | |
| 32 resource_pool_(resource_pool), | |
| 33 resource_(resource), | |
| 34 raster_resource_( | |
| 35 resource_pool->AcquireResource(resource->size(), resource_format)), | |
| 36 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | |
| 37 resource_provider_, | |
| 38 raster_resource_->id())), | |
| 39 sequence_(0) {} | |
| 40 | |
| 41 ~RasterBufferImpl() override { | |
| 42 // Release write lock in case a copy was never scheduled. | |
| 43 lock_.reset(); | |
| 44 | |
| 45 // Make sure any scheduled copy operations are issued before we release the | |
| 46 // raster resource. | |
| 47 if (sequence_) | |
| 48 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
| 49 | |
| 50 // Return raster resource to pool so it can be used by another RasterBuffer | |
| 51 // instance. | |
| 52 if (raster_resource_) | |
| 53 resource_pool_->ReleaseResource(raster_resource_.Pass()); | |
| 54 } | |
| 55 | |
| 56 // Overridden from RasterBuffer: | |
| 57 void Playback(const RasterSource* raster_source, | |
| 58 const gfx::Rect& rect, | |
| 59 float scale) override { | |
| 60 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | |
| 61 lock_.Pass(), raster_resource_.Pass(), resource_, raster_source, rect, | |
| 62 scale); | |
| 63 } | |
| 64 | |
| 65 private: | |
| 66 OneCopyTileTaskWorkerPool* worker_pool_; | |
| 67 ResourceProvider* resource_provider_; | |
| 68 ResourcePool* resource_pool_; | |
| 69 const Resource* resource_; | |
| 70 scoped_ptr<ScopedResource> raster_resource_; | |
| 71 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | |
| 72 CopySequenceNumber sequence_; | |
| 73 | |
| 74 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | |
| 75 }; | |
| 76 | |
| 77 // Flush interval when performing copy operations. | |
| 78 const int kCopyFlushPeriod = 4; | |
| 79 | |
| 80 // Number of in-flight copy operations to allow. | |
| 81 const int kMaxCopyOperations = 32; | |
| 82 | |
| 83 // Delay been checking for copy operations to complete. | |
| 84 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | |
| 85 | |
| 86 // Number of failed attempts to allow before we perform a check that will | |
| 87 // wait for copy operations to complete if needed. | |
| 88 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | |
| 89 | |
| 90 } // namespace | |
| 91 | |
| 92 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | |
| 93 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, | |
| 94 scoped_ptr<ScopedResource> src, | |
| 95 const Resource* dst) | |
| 96 : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) { | |
| 97 } | |
| 98 | |
| 99 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | |
| 100 } | |
| 101 | |
| 102 // static | |
| 103 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | |
| 104 base::SequencedTaskRunner* task_runner, | |
| 105 TaskGraphRunner* task_graph_runner, | |
| 106 ContextProvider* context_provider, | |
| 107 ResourceProvider* resource_provider, | |
| 108 ResourcePool* resource_pool) { | |
| 109 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | |
| 110 task_runner, task_graph_runner, context_provider, resource_provider, | |
| 111 resource_pool)); | |
| 112 } | |
| 113 | |
| 114 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | |
| 115 base::SequencedTaskRunner* task_runner, | |
| 116 TaskGraphRunner* task_graph_runner, | |
| 117 ContextProvider* context_provider, | |
| 118 ResourceProvider* resource_provider, | |
| 119 ResourcePool* resource_pool) | |
| 120 : task_runner_(task_runner), | |
| 121 task_graph_runner_(task_graph_runner), | |
| 122 namespace_token_(task_graph_runner->GetNamespaceToken()), | |
| 123 context_provider_(context_provider), | |
| 124 resource_provider_(resource_provider), | |
| 125 resource_pool_(resource_pool), | |
| 126 last_issued_copy_operation_(0), | |
| 127 last_flushed_copy_operation_(0), | |
| 128 lock_(), | |
| 129 copy_operation_count_cv_(&lock_), | |
| 130 scheduled_copy_operation_count_(0), | |
| 131 issued_copy_operation_count_(0), | |
| 132 next_copy_operation_sequence_(1), | |
| 133 check_for_completed_copy_operations_pending_(false), | |
| 134 shutdown_(false), | |
| 135 weak_ptr_factory_(this), | |
| 136 task_set_finished_weak_ptr_factory_(this) { | |
| 137 DCHECK(context_provider_); | |
| 138 } | |
| 139 | |
| 140 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | |
| 141 DCHECK_EQ(scheduled_copy_operation_count_, 0u); | |
| 142 } | |
| 143 | |
| 144 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | |
| 145 return this; | |
| 146 } | |
| 147 | |
| 148 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | |
| 149 client_ = client; | |
| 150 } | |
| 151 | |
| 152 void OneCopyTileTaskWorkerPool::Shutdown() { | |
| 153 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | |
| 154 | |
| 155 { | |
| 156 base::AutoLock lock(lock_); | |
| 157 | |
| 158 shutdown_ = true; | |
| 159 copy_operation_count_cv_.Signal(); | |
| 160 } | |
| 161 | |
| 162 TaskGraph empty; | |
| 163 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | |
| 164 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | |
| 165 } | |
| 166 | |
| 167 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | |
| 168 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | |
| 169 | |
| 170 #if DCHECK_IS_ON() | |
| 171 { | |
| 172 base::AutoLock lock(lock_); | |
| 173 DCHECK(!shutdown_); | |
| 174 } | |
| 175 #endif | |
| 176 | |
| 177 if (tasks_pending_.none()) | |
| 178 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | |
| 179 | |
| 180 // Mark all task sets as pending. | |
| 181 tasks_pending_.set(); | |
| 182 | |
| 183 unsigned priority = kTileTaskPriorityBase; | |
| 184 | |
| 185 graph_.Reset(); | |
| 186 | |
| 187 // Cancel existing OnTaskSetFinished callbacks. | |
| 188 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | |
| 189 | |
| 190 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | |
| 191 | |
| 192 size_t task_count[kNumberOfTaskSets] = {0}; | |
| 193 | |
| 194 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
| 195 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | |
| 196 task_runner_.get(), | |
| 197 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | |
| 198 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | |
| 199 } | |
| 200 | |
| 201 resource_pool_->CheckBusyResources(false); | |
| 202 | |
| 203 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | |
| 204 it != queue->items.end(); ++it) { | |
| 205 const TileTaskQueue::Item& item = *it; | |
| 206 RasterTask* task = item.task; | |
| 207 DCHECK(!task->HasCompleted()); | |
| 208 | |
| 209 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
| 210 if (!item.task_sets[task_set]) | |
| 211 continue; | |
| 212 | |
| 213 ++task_count[task_set]; | |
| 214 | |
| 215 graph_.edges.push_back( | |
| 216 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | |
| 217 } | |
| 218 | |
| 219 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | |
| 220 } | |
| 221 | |
| 222 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
| 223 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | |
| 224 kTaskSetFinishedTaskPriorityBase + task_set, | |
| 225 task_count[task_set]); | |
| 226 } | |
| 227 | |
| 228 ScheduleTasksOnOriginThread(this, &graph_); | |
| 229 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | |
| 230 | |
| 231 std::copy(new_task_set_finished_tasks, | |
| 232 new_task_set_finished_tasks + kNumberOfTaskSets, | |
| 233 task_set_finished_tasks_); | |
| 234 | |
| 235 resource_pool_->ReduceResourceUsage(); | |
| 236 | |
| 237 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | |
| 238 StateAsValue()); | |
| 239 } | |
| 240 | |
| 241 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | |
| 242 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | |
| 243 | |
| 244 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
| 245 &completed_tasks_); | |
| 246 | |
| 247 for (Task::Vector::const_iterator it = completed_tasks_.begin(); | |
| 248 it != completed_tasks_.end(); ++it) { | |
| 249 TileTask* task = static_cast<TileTask*>(it->get()); | |
| 250 | |
| 251 task->WillComplete(); | |
| 252 task->CompleteOnOriginThread(this); | |
| 253 task->DidComplete(); | |
| 254 | |
| 255 task->RunReplyOnOriginThread(); | |
| 256 } | |
| 257 completed_tasks_.clear(); | |
| 258 } | |
| 259 | |
| 260 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() { | |
| 261 return resource_provider_->best_texture_format(); | |
| 262 } | |
| 263 | |
| 264 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | |
| 265 const Resource* resource) { | |
| 266 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | |
| 267 return make_scoped_ptr<RasterBuffer>( | |
| 268 new RasterBufferImpl(this, resource_provider_, resource_pool_, | |
| 269 resource_provider_->best_texture_format(), | |
| 270 resource)); | |
| 271 } | |
| 272 | |
| 273 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | |
| 274 scoped_ptr<RasterBuffer> buffer) { | |
| 275 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | |
| 276 } | |
| 277 | |
| 278 CopySequenceNumber | |
| 279 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | |
| 280 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, | |
| 281 scoped_ptr<ScopedResource> src, | |
| 282 const Resource* dst, | |
| 283 const RasterSource* raster_source, | |
| 284 const gfx::Rect& rect, | |
| 285 float scale) { | |
| 286 base::AutoLock lock(lock_); | |
| 287 | |
| 288 int failed_attempts = 0; | |
| 289 while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >= | |
| 290 kMaxCopyOperations) { | |
| 291 // Ignore limit when shutdown is set. | |
| 292 if (shutdown_) | |
| 293 break; | |
| 294 | |
| 295 ++failed_attempts; | |
| 296 | |
| 297 // Schedule a check that will also wait for operations to complete | |
| 298 // after too many failed attempts. | |
| 299 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | |
| 300 | |
| 301 // Schedule a check for completed copy operations if too many operations | |
| 302 // are currently in-flight. | |
| 303 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | |
| 304 | |
| 305 { | |
| 306 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | |
| 307 | |
| 308 // Wait for in-flight copy operations to drop below limit. | |
| 309 copy_operation_count_cv_.Wait(); | |
| 310 } | |
| 311 } | |
| 312 | |
| 313 // Increment |scheduled_copy_operation_count_| before releasing |lock_|. | |
| 314 ++scheduled_copy_operation_count_; | |
| 315 | |
| 316 // There may be more work available, so wake up another worker thread. | |
| 317 copy_operation_count_cv_.Signal(); | |
| 318 | |
| 319 { | |
| 320 base::AutoUnlock unlock(lock_); | |
| 321 | |
| 322 gfx::GpuMemoryBuffer* gpu_memory_buffer = write_lock->GetGpuMemoryBuffer(); | |
| 323 if (gpu_memory_buffer) { | |
| 324 TileTaskWorkerPool::PlaybackToMemory( | |
| 325 gpu_memory_buffer->Map(), src->format(), src->size(), | |
| 326 gpu_memory_buffer->GetStride(), raster_source, rect, scale); | |
| 327 gpu_memory_buffer->Unmap(); | |
| 328 } | |
| 329 } | |
| 330 | |
| 331 pending_copy_operations_.push_back( | |
| 332 make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst))); | |
| 333 | |
| 334 // Acquire a sequence number for this copy operation. | |
| 335 CopySequenceNumber sequence = next_copy_operation_sequence_++; | |
| 336 | |
| 337 // Post task that will advance last flushed copy operation to |sequence| | |
| 338 // if we have reached the flush period. | |
| 339 if ((sequence % kCopyFlushPeriod) == 0) { | |
| 340 task_runner_->PostTask( | |
| 341 FROM_HERE, | |
| 342 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | |
| 343 weak_ptr_factory_.GetWeakPtr(), sequence)); | |
| 344 } | |
| 345 | |
| 346 return sequence; | |
| 347 } | |
| 348 | |
| 349 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | |
| 350 CopySequenceNumber sequence) { | |
| 351 if (last_issued_copy_operation_ >= sequence) | |
| 352 return; | |
| 353 | |
| 354 IssueCopyOperations(sequence - last_issued_copy_operation_); | |
| 355 last_issued_copy_operation_ = sequence; | |
| 356 } | |
| 357 | |
| 358 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | |
| 359 CopySequenceNumber sequence) { | |
| 360 if (last_flushed_copy_operation_ >= sequence) | |
| 361 return; | |
| 362 | |
| 363 AdvanceLastIssuedCopyTo(sequence); | |
| 364 | |
| 365 // Flush all issued copy operations. | |
| 366 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | |
| 367 last_flushed_copy_operation_ = last_issued_copy_operation_; | |
| 368 } | |
| 369 | |
| 370 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | |
| 371 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | |
| 372 task_set); | |
| 373 | |
| 374 DCHECK(tasks_pending_[task_set]); | |
| 375 tasks_pending_[task_set] = false; | |
| 376 if (tasks_pending_.any()) { | |
| 377 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | |
| 378 "state", StateAsValue()); | |
| 379 } else { | |
| 380 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | |
| 381 } | |
| 382 client_->DidFinishRunningTileTasks(task_set); | |
| 383 } | |
| 384 | |
| 385 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
| 386 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
| 387 count); | |
| 388 | |
| 389 CopyOperation::Deque copy_operations; | |
| 390 | |
| 391 { | |
| 392 base::AutoLock lock(lock_); | |
| 393 | |
| 394 for (int64 i = 0; i < count; ++i) { | |
| 395 DCHECK(!pending_copy_operations_.empty()); | |
| 396 copy_operations.push_back(pending_copy_operations_.take_front()); | |
| 397 } | |
| 398 | |
| 399 // Decrement |scheduled_copy_operation_count_| and increment | |
| 400 // |issued_copy_operation_count_| to reflect the transition of copy | |
| 401 // operations from "pending" to "issued" state. | |
| 402 DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size()); | |
| 403 scheduled_copy_operation_count_ -= copy_operations.size(); | |
| 404 issued_copy_operation_count_ += copy_operations.size(); | |
| 405 } | |
| 406 | |
| 407 while (!copy_operations.empty()) { | |
| 408 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
| 409 | |
| 410 // Remove the write lock. | |
| 411 copy_operation->write_lock.reset(); | |
| 412 | |
| 413 // Copy contents of source resource to destination resource. | |
| 414 resource_provider_->CopyResource(copy_operation->src->id(), | |
| 415 copy_operation->dst->id()); | |
| 416 | |
| 417 // Return source resource to pool where it can be reused once copy | |
| 418 // operation has completed and resource is no longer busy. | |
| 419 resource_pool_->ReleaseResource(copy_operation->src.Pass()); | |
| 420 } | |
| 421 } | |
| 422 | |
| 423 void OneCopyTileTaskWorkerPool:: | |
| 424 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
| 425 bool wait_if_needed) { | |
| 426 lock_.AssertAcquired(); | |
| 427 | |
| 428 if (check_for_completed_copy_operations_pending_) | |
| 429 return; | |
| 430 | |
| 431 base::TimeTicks now = base::TimeTicks::Now(); | |
| 432 | |
| 433 // Schedule a check for completed copy operations as soon as possible but | |
| 434 // don't allow two consecutive checks to be scheduled to run less than the | |
| 435 // tick rate apart. | |
| 436 base::TimeTicks next_check_for_completed_copy_operations_time = | |
| 437 std::max(last_check_for_completed_copy_operations_time_ + | |
| 438 base::TimeDelta::FromMilliseconds( | |
| 439 kCheckForCompletedCopyOperationsTickRateMs), | |
| 440 now); | |
| 441 | |
| 442 task_runner_->PostDelayedTask( | |
| 443 FROM_HERE, | |
| 444 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
| 445 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
| 446 next_check_for_completed_copy_operations_time - now); | |
| 447 | |
| 448 last_check_for_completed_copy_operations_time_ = | |
| 449 next_check_for_completed_copy_operations_time; | |
| 450 check_for_completed_copy_operations_pending_ = true; | |
| 451 } | |
| 452 | |
| 453 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
| 454 bool wait_if_needed) { | |
| 455 TRACE_EVENT1("cc", | |
| 456 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
| 457 "wait_if_needed", wait_if_needed); | |
| 458 | |
| 459 resource_pool_->CheckBusyResources(wait_if_needed); | |
| 460 | |
| 461 { | |
| 462 base::AutoLock lock(lock_); | |
| 463 | |
| 464 DCHECK(check_for_completed_copy_operations_pending_); | |
| 465 check_for_completed_copy_operations_pending_ = false; | |
| 466 | |
| 467 // The number of busy resources in the pool reflects the number of issued | |
| 468 // copy operations that have not yet completed. | |
| 469 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
| 470 | |
| 471 // There may be work blocked on too many in-flight copy operations, so wake | |
| 472 // up a worker thread. | |
| 473 copy_operation_count_cv_.Signal(); | |
| 474 } | |
| 475 } | |
| 476 | |
| 477 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | |
| 478 OneCopyTileTaskWorkerPool::StateAsValue() const { | |
| 479 scoped_refptr<base::trace_event::TracedValue> state = | |
| 480 new base::trace_event::TracedValue(); | |
| 481 | |
| 482 state->BeginArray("tasks_pending"); | |
| 483 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | |
| 484 state->AppendBoolean(tasks_pending_[task_set]); | |
| 485 state->EndArray(); | |
| 486 state->BeginDictionary("staging_state"); | |
| 487 StagingStateAsValueInto(state.get()); | |
| 488 state->EndDictionary(); | |
| 489 | |
| 490 return state; | |
| 491 } | |
| 492 | |
| 493 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | |
| 494 base::trace_event::TracedValue* staging_state) const { | |
| 495 staging_state->SetInteger("staging_resource_count", | |
| 496 resource_pool_->total_resource_count()); | |
| 497 staging_state->SetInteger("bytes_used_for_staging_resources", | |
| 498 resource_pool_->total_memory_usage_bytes()); | |
| 499 staging_state->SetInteger("pending_copy_count", | |
| 500 resource_pool_->total_resource_count() - | |
| 501 resource_pool_->acquired_resource_count()); | |
| 502 staging_state->SetInteger("bytes_pending_copy", | |
| 503 resource_pool_->total_memory_usage_bytes() - | |
| 504 resource_pool_->acquired_memory_usage_bytes()); | |
| 505 } | |
| 506 | |
| 507 } // namespace cc | |
| OLD | NEW |