OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/resources/one_copy_raster_worker_pool.h" | 5 #include "cc/resources/one_copy_raster_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 // Release write lock in case a copy was never scheduled. | 40 // Release write lock in case a copy was never scheduled. |
41 lock_.reset(); | 41 lock_.reset(); |
42 | 42 |
43 // Make sure any scheduled copy operations are issued before we release the | 43 // Make sure any scheduled copy operations are issued before we release the |
44 // raster resource. | 44 // raster resource. |
45 if (sequence_) | 45 if (sequence_) |
46 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | 46 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); |
47 | 47 |
48 // Return raster resource to pool so it can be used by another RasterBuffer | 48 // Return raster resource to pool so it can be used by another RasterBuffer |
49 // instance. | 49 // instance. |
50 resource_pool_->ReleaseResource(raster_resource_.Pass()); | 50 if (raster_resource_) |
| 51 resource_pool_->ReleaseResource(raster_resource_.Pass()); |
51 } | 52 } |
52 | 53 |
53 // Overridden from RasterBuffer: | 54 // Overridden from RasterBuffer: |
54 void Playback(const RasterSource* raster_source, | 55 void Playback(const RasterSource* raster_source, |
55 const gfx::Rect& rect, | 56 const gfx::Rect& rect, |
56 float scale, | 57 float scale, |
57 RenderingStatsInstrumentation* stats) override { | 58 RenderingStatsInstrumentation* stats) override { |
58 gfx::GpuMemoryBuffer* gpu_memory_buffer = lock_->GetGpuMemoryBuffer(); | 59 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( |
59 if (!gpu_memory_buffer) | 60 lock_.Pass(), |
60 return; | 61 raster_resource_.Pass(), |
61 | 62 resource_, |
62 RasterWorkerPool::PlaybackToMemory(gpu_memory_buffer->Map(), | 63 raster_source, |
63 raster_resource_->format(), | 64 rect, |
64 raster_resource_->size(), | 65 scale, |
65 gpu_memory_buffer->GetStride(), | 66 stats); |
66 raster_source, | |
67 rect, | |
68 scale, | |
69 stats); | |
70 gpu_memory_buffer->Unmap(); | |
71 | |
72 sequence_ = worker_pool_->ScheduleCopyOnWorkerThread( | |
73 lock_.Pass(), raster_resource_.get(), resource_); | |
74 } | 67 } |
75 | 68 |
76 private: | 69 private: |
77 OneCopyRasterWorkerPool* worker_pool_; | 70 OneCopyRasterWorkerPool* worker_pool_; |
78 ResourceProvider* resource_provider_; | 71 ResourceProvider* resource_provider_; |
79 ResourcePool* resource_pool_; | 72 ResourcePool* resource_pool_; |
80 const Resource* resource_; | 73 const Resource* resource_; |
81 scoped_ptr<ScopedResource> raster_resource_; | 74 scoped_ptr<ScopedResource> raster_resource_; |
82 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | 75 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; |
83 CopySequenceNumber sequence_; | 76 CopySequenceNumber sequence_; |
84 | 77 |
85 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 78 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
86 }; | 79 }; |
87 | 80 |
88 // Flush interval when performing copy operations. | 81 // Flush interval when performing copy operations. |
89 const int kCopyFlushPeriod = 4; | 82 const int kCopyFlushPeriod = 4; |
90 | 83 |
| 84 // Number of in-flight copy operations to allow. |
| 85 const int kMaxCopyOperations = 16; |
| 86 |
| 87 // Delay been checking for copy operations to complete. |
| 88 const int kCheckForCompletedCopyOperationsTickRateMs = 1; |
| 89 |
| 90 // Number of failed attempts to allow before we perform a check that will |
| 91 // wait for copy operations to complete if needed. |
| 92 const int kFailedAttemptsBeforeWaitIfNeeded = 256; |
| 93 |
91 } // namespace | 94 } // namespace |
92 | 95 |
93 OneCopyRasterWorkerPool::CopyOperation::CopyOperation( | 96 OneCopyRasterWorkerPool::CopyOperation::CopyOperation( |
94 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, | 97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, |
95 ResourceProvider::ResourceId src, | 98 scoped_ptr<ScopedResource> src, |
96 ResourceProvider::ResourceId dst) | 99 const Resource* dst) |
97 : write_lock(write_lock.Pass()), src(src), dst(dst) { | 100 : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) { |
98 } | 101 } |
99 | 102 |
100 OneCopyRasterWorkerPool::CopyOperation::~CopyOperation() { | 103 OneCopyRasterWorkerPool::CopyOperation::~CopyOperation() { |
101 } | 104 } |
102 | 105 |
103 // static | 106 // static |
104 scoped_ptr<RasterWorkerPool> OneCopyRasterWorkerPool::Create( | 107 scoped_ptr<RasterWorkerPool> OneCopyRasterWorkerPool::Create( |
105 base::SequencedTaskRunner* task_runner, | 108 base::SequencedTaskRunner* task_runner, |
106 TaskGraphRunner* task_graph_runner, | 109 TaskGraphRunner* task_graph_runner, |
107 ContextProvider* context_provider, | 110 ContextProvider* context_provider, |
(...skipping 14 matching lines...) Expand all Loading... |
122 ResourceProvider* resource_provider, | 125 ResourceProvider* resource_provider, |
123 ResourcePool* resource_pool) | 126 ResourcePool* resource_pool) |
124 : task_runner_(task_runner), | 127 : task_runner_(task_runner), |
125 task_graph_runner_(task_graph_runner), | 128 task_graph_runner_(task_graph_runner), |
126 namespace_token_(task_graph_runner->GetNamespaceToken()), | 129 namespace_token_(task_graph_runner->GetNamespaceToken()), |
127 context_provider_(context_provider), | 130 context_provider_(context_provider), |
128 resource_provider_(resource_provider), | 131 resource_provider_(resource_provider), |
129 resource_pool_(resource_pool), | 132 resource_pool_(resource_pool), |
130 last_issued_copy_operation_(0), | 133 last_issued_copy_operation_(0), |
131 last_flushed_copy_operation_(0), | 134 last_flushed_copy_operation_(0), |
| 135 lock_(), |
| 136 copy_operation_count_cv_(&lock_), |
| 137 scheduled_copy_operation_count_(0), |
| 138 issued_copy_operation_count_(0), |
132 next_copy_operation_sequence_(1), | 139 next_copy_operation_sequence_(1), |
| 140 check_for_completed_copy_operations_pending_(false), |
| 141 shutdown_(false), |
133 weak_ptr_factory_(this), | 142 weak_ptr_factory_(this), |
134 raster_finished_weak_ptr_factory_(this) { | 143 raster_finished_weak_ptr_factory_(this) { |
135 DCHECK(context_provider_); | 144 DCHECK(context_provider_); |
136 } | 145 } |
137 | 146 |
138 OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() { | 147 OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() { |
| 148 DCHECK_EQ(scheduled_copy_operation_count_, 0u); |
139 } | 149 } |
140 | 150 |
141 Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() { | 151 Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() { |
142 return this; | 152 return this; |
143 } | 153 } |
144 | 154 |
145 void OneCopyRasterWorkerPool::SetClient(RasterizerClient* client) { | 155 void OneCopyRasterWorkerPool::SetClient(RasterizerClient* client) { |
146 client_ = client; | 156 client_ = client; |
147 } | 157 } |
148 | 158 |
149 void OneCopyRasterWorkerPool::Shutdown() { | 159 void OneCopyRasterWorkerPool::Shutdown() { |
150 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown"); | 160 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown"); |
151 | 161 |
| 162 { |
| 163 base::AutoLock lock(lock_); |
| 164 |
| 165 shutdown_ = true; |
| 166 copy_operation_count_cv_.Signal(); |
| 167 } |
| 168 |
152 TaskGraph empty; | 169 TaskGraph empty; |
153 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 170 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
154 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 171 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
155 } | 172 } |
156 | 173 |
157 void OneCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) { | 174 void OneCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) { |
158 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::ScheduleTasks"); | 175 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::ScheduleTasks"); |
159 | 176 |
160 if (raster_pending_.none()) | 177 if (raster_pending_.none()) |
161 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 178 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
(...skipping 13 matching lines...) Expand all Loading... |
175 size_t task_count[kNumberOfTaskSets] = {0}; | 192 size_t task_count[kNumberOfTaskSets] = {0}; |
176 | 193 |
177 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 194 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
178 new_raster_finished_tasks[task_set] = CreateRasterFinishedTask( | 195 new_raster_finished_tasks[task_set] = CreateRasterFinishedTask( |
179 task_runner_.get(), | 196 task_runner_.get(), |
180 base::Bind(&OneCopyRasterWorkerPool::OnRasterFinished, | 197 base::Bind(&OneCopyRasterWorkerPool::OnRasterFinished, |
181 raster_finished_weak_ptr_factory_.GetWeakPtr(), | 198 raster_finished_weak_ptr_factory_.GetWeakPtr(), |
182 task_set)); | 199 task_set)); |
183 } | 200 } |
184 | 201 |
185 resource_pool_->CheckBusyResources(); | 202 resource_pool_->CheckBusyResources(false); |
186 | 203 |
187 for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 204 for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
188 it != queue->items.end(); | 205 it != queue->items.end(); |
189 ++it) { | 206 ++it) { |
190 const RasterTaskQueue::Item& item = *it; | 207 const RasterTaskQueue::Item& item = *it; |
191 RasterTask* task = item.task; | 208 RasterTask* task = item.task; |
192 DCHECK(!task->HasCompleted()); | 209 DCHECK(!task->HasCompleted()); |
193 | 210 |
194 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 211 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
195 if (!item.task_sets[task_set]) | 212 if (!item.task_sets[task_set]) |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
249 DCHECK_EQ(resource->format(), resource_pool_->resource_format()); | 266 DCHECK_EQ(resource->format(), resource_pool_->resource_format()); |
250 return make_scoped_ptr<RasterBuffer>( | 267 return make_scoped_ptr<RasterBuffer>( |
251 new RasterBufferImpl(this, resource_provider_, resource_pool_, resource)); | 268 new RasterBufferImpl(this, resource_provider_, resource_pool_, resource)); |
252 } | 269 } |
253 | 270 |
254 void OneCopyRasterWorkerPool::ReleaseBufferForRaster( | 271 void OneCopyRasterWorkerPool::ReleaseBufferForRaster( |
255 scoped_ptr<RasterBuffer> buffer) { | 272 scoped_ptr<RasterBuffer> buffer) { |
256 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 273 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
257 } | 274 } |
258 | 275 |
259 CopySequenceNumber OneCopyRasterWorkerPool::ScheduleCopyOnWorkerThread( | 276 CopySequenceNumber |
| 277 OneCopyRasterWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( |
260 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, | 278 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock, |
261 const Resource* src, | 279 scoped_ptr<ScopedResource> src, |
262 const Resource* dst) { | 280 const Resource* dst, |
| 281 const RasterSource* raster_source, |
| 282 const gfx::Rect& rect, |
| 283 float scale, |
| 284 RenderingStatsInstrumentation* stats) { |
263 CopySequenceNumber sequence; | 285 CopySequenceNumber sequence; |
264 | 286 |
265 { | 287 { |
266 base::AutoLock lock(lock_); | 288 base::AutoLock lock(lock_); |
267 | 289 |
| 290 int failed_attempts = 0; |
| 291 while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >= |
| 292 kMaxCopyOperations) { |
| 293 // Ignore limit when shutdown is set. |
| 294 if (shutdown_) |
| 295 break; |
| 296 |
| 297 ++failed_attempts; |
| 298 |
| 299 // Schedule a check that will also wait for operations to complete |
| 300 // after too many failed attempts. |
| 301 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; |
| 302 |
| 303 // Schedule a check for completed copy operations if too many operations |
| 304 // are currently in-flight. |
| 305 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); |
| 306 |
| 307 { |
| 308 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); |
| 309 |
| 310 // Wait for in-flight copy operations to drop below limit. |
| 311 copy_operation_count_cv_.Wait(); |
| 312 } |
| 313 } |
| 314 |
| 315 // Increment |scheduled_copy_operation_count_| before releasing |lock_|. |
| 316 ++scheduled_copy_operation_count_; |
| 317 |
| 318 // There may be more work available, so wake up another worker thread. |
| 319 copy_operation_count_cv_.Signal(); |
| 320 |
| 321 { |
| 322 base::AutoUnlock unlock(lock_); |
| 323 |
| 324 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
| 325 write_lock->GetGpuMemoryBuffer(); |
| 326 if (gpu_memory_buffer) { |
| 327 RasterWorkerPool::PlaybackToMemory(gpu_memory_buffer->Map(), |
| 328 src->format(), |
| 329 src->size(), |
| 330 gpu_memory_buffer->GetStride(), |
| 331 raster_source, |
| 332 rect, |
| 333 scale, |
| 334 stats); |
| 335 gpu_memory_buffer->Unmap(); |
| 336 } |
| 337 } |
| 338 |
| 339 // Acquire a sequence number for this copy operation. |
268 sequence = next_copy_operation_sequence_++; | 340 sequence = next_copy_operation_sequence_++; |
269 | 341 |
270 pending_copy_operations_.push_back(make_scoped_ptr( | 342 pending_copy_operations_.push_back( |
271 new CopyOperation(write_lock.Pass(), src->id(), dst->id()))); | 343 make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst))); |
272 } | 344 } |
273 | 345 |
274 // Post task that will advance last flushed copy operation to |sequence| | 346 // Post task that will advance last flushed copy operation to |sequence| |
275 // if we have reached the flush period. | 347 // if we have reached the flush period. |
276 if ((sequence % kCopyFlushPeriod) == 0) { | 348 if ((sequence % kCopyFlushPeriod) == 0) { |
277 task_runner_->PostTask( | 349 task_runner_->PostTask( |
278 FROM_HERE, | 350 FROM_HERE, |
279 base::Bind(&OneCopyRasterWorkerPool::AdvanceLastFlushedCopyTo, | 351 base::Bind(&OneCopyRasterWorkerPool::AdvanceLastFlushedCopyTo, |
280 weak_ptr_factory_.GetWeakPtr(), | 352 weak_ptr_factory_.GetWeakPtr(), |
281 sequence)); | 353 sequence)); |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
326 | 398 |
327 CopyOperation::Deque copy_operations; | 399 CopyOperation::Deque copy_operations; |
328 | 400 |
329 { | 401 { |
330 base::AutoLock lock(lock_); | 402 base::AutoLock lock(lock_); |
331 | 403 |
332 for (int64 i = 0; i < count; ++i) { | 404 for (int64 i = 0; i < count; ++i) { |
333 DCHECK(!pending_copy_operations_.empty()); | 405 DCHECK(!pending_copy_operations_.empty()); |
334 copy_operations.push_back(pending_copy_operations_.take_front()); | 406 copy_operations.push_back(pending_copy_operations_.take_front()); |
335 } | 407 } |
| 408 |
| 409 // Decrement |scheduled_copy_operation_count_| and increment |
| 410 // |issued_copy_operation_count_| to reflect the transition of copy |
| 411 // operations from "pending" to "issued" state. |
| 412 DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size()); |
| 413 scheduled_copy_operation_count_ -= copy_operations.size(); |
| 414 issued_copy_operation_count_ += copy_operations.size(); |
336 } | 415 } |
337 | 416 |
338 while (!copy_operations.empty()) { | 417 while (!copy_operations.empty()) { |
339 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | 418 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); |
340 | 419 |
341 // Remove the write lock. | 420 // Remove the write lock. |
342 copy_operation->write_lock.reset(); | 421 copy_operation->write_lock.reset(); |
343 | 422 |
344 // Copy contents of source resource to destination resource. | 423 // Copy contents of source resource to destination resource. |
345 resource_provider_->CopyResource(copy_operation->src, copy_operation->dst); | 424 resource_provider_->CopyResource(copy_operation->src->id(), |
| 425 copy_operation->dst->id()); |
| 426 |
| 427 // Return source resource to pool where it can be reused once copy |
| 428 // operation has completed and resource is no longer busy. |
| 429 resource_pool_->ReleaseResource(copy_operation->src.Pass()); |
346 } | 430 } |
347 } | 431 } |
348 | 432 |
| 433 void OneCopyRasterWorkerPool:: |
| 434 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( |
| 435 bool wait_if_needed) { |
| 436 lock_.AssertAcquired(); |
| 437 |
| 438 if (check_for_completed_copy_operations_pending_) |
| 439 return; |
| 440 |
| 441 base::TimeTicks now = base::TimeTicks::Now(); |
| 442 |
| 443 // Schedule a check for completed copy operations as soon as possible but |
| 444 // don't allow two consecutive checks to be scheduled to run less than the |
| 445 // tick rate apart. |
| 446 base::TimeTicks next_check_for_completed_copy_operations_time = |
| 447 std::max(last_check_for_completed_copy_operations_time_ + |
| 448 base::TimeDelta::FromMilliseconds( |
| 449 kCheckForCompletedCopyOperationsTickRateMs), |
| 450 now); |
| 451 |
| 452 task_runner_->PostDelayedTask( |
| 453 FROM_HERE, |
| 454 base::Bind(&OneCopyRasterWorkerPool::CheckForCompletedCopyOperations, |
| 455 weak_ptr_factory_.GetWeakPtr(), |
| 456 wait_if_needed), |
| 457 next_check_for_completed_copy_operations_time - now); |
| 458 |
| 459 last_check_for_completed_copy_operations_time_ = |
| 460 next_check_for_completed_copy_operations_time; |
| 461 check_for_completed_copy_operations_pending_ = true; |
| 462 } |
| 463 |
| 464 void OneCopyRasterWorkerPool::CheckForCompletedCopyOperations( |
| 465 bool wait_if_needed) { |
| 466 TRACE_EVENT1("cc", |
| 467 "OneCopyRasterWorkerPool::CheckForCompletedCopyOperations", |
| 468 "wait_if_needed", |
| 469 wait_if_needed); |
| 470 |
| 471 resource_pool_->CheckBusyResources(wait_if_needed); |
| 472 |
| 473 { |
| 474 base::AutoLock lock(lock_); |
| 475 |
| 476 DCHECK(check_for_completed_copy_operations_pending_); |
| 477 check_for_completed_copy_operations_pending_ = false; |
| 478 |
| 479 // The number of busy resources in the pool reflects the number of issued |
| 480 // copy operations that have not yet completed. |
| 481 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); |
| 482 |
| 483 // There may be work blocked on too many in-flight copy operations, so wake |
| 484 // up a worker thread. |
| 485 copy_operation_count_cv_.Signal(); |
| 486 } |
| 487 } |
| 488 |
349 scoped_refptr<base::debug::ConvertableToTraceFormat> | 489 scoped_refptr<base::debug::ConvertableToTraceFormat> |
350 OneCopyRasterWorkerPool::StateAsValue() const { | 490 OneCopyRasterWorkerPool::StateAsValue() const { |
351 scoped_refptr<base::debug::TracedValue> state = | 491 scoped_refptr<base::debug::TracedValue> state = |
352 new base::debug::TracedValue(); | 492 new base::debug::TracedValue(); |
353 | 493 |
354 state->BeginArray("tasks_pending"); | 494 state->BeginArray("tasks_pending"); |
355 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 495 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
356 state->AppendBoolean(raster_pending_[task_set]); | 496 state->AppendBoolean(raster_pending_[task_set]); |
357 state->EndArray(); | 497 state->EndArray(); |
358 state->BeginDictionary("staging_state"); | 498 state->BeginDictionary("staging_state"); |
359 StagingStateAsValueInto(state.get()); | 499 StagingStateAsValueInto(state.get()); |
360 state->EndDictionary(); | 500 state->EndDictionary(); |
361 | 501 |
362 return state; | 502 return state; |
363 } | 503 } |
| 504 |
364 void OneCopyRasterWorkerPool::StagingStateAsValueInto( | 505 void OneCopyRasterWorkerPool::StagingStateAsValueInto( |
365 base::debug::TracedValue* staging_state) const { | 506 base::debug::TracedValue* staging_state) const { |
366 staging_state->SetInteger("staging_resource_count", | 507 staging_state->SetInteger("staging_resource_count", |
367 resource_pool_->total_resource_count()); | 508 resource_pool_->total_resource_count()); |
368 staging_state->SetInteger("bytes_used_for_staging_resources", | 509 staging_state->SetInteger("bytes_used_for_staging_resources", |
369 resource_pool_->total_memory_usage_bytes()); | 510 resource_pool_->total_memory_usage_bytes()); |
370 staging_state->SetInteger("pending_copy_count", | 511 staging_state->SetInteger("pending_copy_count", |
371 resource_pool_->total_resource_count() - | 512 resource_pool_->total_resource_count() - |
372 resource_pool_->acquired_resource_count()); | 513 resource_pool_->acquired_resource_count()); |
373 staging_state->SetInteger("bytes_pending_copy", | 514 staging_state->SetInteger("bytes_pending_copy", |
374 resource_pool_->total_memory_usage_bytes() - | 515 resource_pool_->total_memory_usage_bytes() - |
375 resource_pool_->acquired_memory_usage_bytes()); | 516 resource_pool_->acquired_memory_usage_bytes()); |
376 } | 517 } |
377 | 518 |
378 } // namespace cc | 519 } // namespace cc |
OLD | NEW |