Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(41)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1861623003: cc: Refactor OneCopyTileTaskWorkerPool. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@dependency_task
Patch Set: rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/staging_buffer_pool.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <limits> 10 #include <limits>
11 #include <utility> 11 #include <utility>
12 12
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/thread_task_runner_handle.h"
16 #include "base/trace_event/memory_dump_manager.h"
17 #include "base/trace_event/trace_event.h"
18 #include "base/trace_event/trace_event_argument.h"
19 #include "cc/base/container_util.h"
20 #include "cc/base/math_util.h" 14 #include "cc/base/math_util.h"
21 #include "cc/debug/traced_value.h"
22 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
16 #include "cc/raster/staging_buffer_pool.h"
23 #include "cc/resources/platform_color.h" 17 #include "cc/resources/platform_color.h"
24 #include "cc/resources/resource_format.h" 18 #include "cc/resources/resource_format.h"
25 #include "cc/resources/resource_util.h" 19 #include "cc/resources/resource_util.h"
26 #include "cc/resources/scoped_resource.h" 20 #include "cc/resources/scoped_resource.h"
27 #include "gpu/GLES2/gl2extchromium.h" 21 #include "gpu/GLES2/gl2extchromium.h"
28 #include "gpu/command_buffer/client/gles2_interface.h" 22 #include "gpu/command_buffer/client/gles2_interface.h"
29 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
30 #include "ui/gfx/buffer_format_util.h" 24 #include "ui/gfx/buffer_format_util.h"
31 25
32 namespace cc { 26 namespace cc {
(...skipping 28 matching lines...) Expand all
61 55
62 private: 56 private:
63 OneCopyTileTaskWorkerPool* worker_pool_; 57 OneCopyTileTaskWorkerPool* worker_pool_;
64 const Resource* resource_; 58 const Resource* resource_;
65 ResourceProvider::ScopedWriteLockGL lock_; 59 ResourceProvider::ScopedWriteLockGL lock_;
66 uint64_t previous_content_id_; 60 uint64_t previous_content_id_;
67 61
68 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
69 }; 63 };
70 64
71 // Delay between checking for query result to be available.
72 const int kCheckForQueryResultAvailableTickRateMs = 1;
73
74 // Number of attempts to allow before we perform a check that will wait for
75 // query to complete.
76 const int kMaxCheckForQueryResultAvailableAttempts = 256;
77
78 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 65 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
79 // default batch size for copy operations. 66 // default batch size for copy operations.
80 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 67 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
81 68
82 // Delay before a staging buffer might be released.
83 const int kStagingBufferExpirationDelayMs = 1000;
84
85 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
86 unsigned complete = 1;
87 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
88 return !!complete;
89 }
90
91 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
92 TRACE_EVENT0("cc", "WaitForQueryResult");
93
94 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
95 while (attempts_left--) {
96 if (CheckForQueryResult(gl, query_id))
97 break;
98
99 // We have to flush the context to be guaranteed that a query result will
100 // be available in a finite amount of time.
101 gl->ShallowFlushCHROMIUM();
102
103 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
104 kCheckForQueryResultAvailableTickRateMs));
105 }
106
107 unsigned result = 0;
108 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
109 }
110
111 } // namespace 69 } // namespace
112 70
113 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size,
114 ResourceFormat format)
115 : size(size),
116 format(format),
117 texture_id(0),
118 image_id(0),
119 query_id(0),
120 content_id(0) {}
121
122 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
123 DCHECK_EQ(texture_id, 0u);
124 DCHECK_EQ(image_id, 0u);
125 DCHECK_EQ(query_id, 0u);
126 }
127
128 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
129 gpu::gles2::GLES2Interface* gl) {
130 if (query_id) {
131 gl->DeleteQueriesEXT(1, &query_id);
132 query_id = 0;
133 }
134 if (image_id) {
135 gl->DestroyImageCHROMIUM(image_id);
136 image_id = 0;
137 }
138 if (texture_id) {
139 gl->DeleteTextures(1, &texture_id);
140 texture_id = 0;
141 }
142 }
143
144 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
145 base::trace_event::ProcessMemoryDump* pmd,
146 ResourceFormat format,
147 bool in_free_list) const {
148 if (!gpu_memory_buffer)
149 return;
150
151 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
152 std::string buffer_dump_name =
153 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id);
154 base::trace_event::MemoryAllocatorDump* buffer_dump =
155 pmd->CreateAllocatorDump(buffer_dump_name);
156
157 uint64_t buffer_size_in_bytes =
158 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
159 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
160 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
161 buffer_size_in_bytes);
162 buffer_dump->AddScalar("free_size",
163 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
164 in_free_list ? buffer_size_in_bytes : 0);
165
166 // Emit an ownership edge towards a global allocator dump node.
167 const uint64_t tracing_process_id =
168 base::trace_event::MemoryDumpManager::GetInstance()
169 ->GetTracingProcessId();
170 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
171 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
172 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
173
174 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
175 // the tracing UI will account the effective size of the buffer to the child.
176 const int kImportance = 2;
177 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
178 }
179
180 // static 71 // static
181 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 72 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
182 base::SequencedTaskRunner* task_runner, 73 base::SequencedTaskRunner* task_runner,
183 TaskGraphRunner* task_graph_runner, 74 TaskGraphRunner* task_graph_runner,
184 ContextProvider* context_provider, 75 ContextProvider* context_provider,
185 ResourceProvider* resource_provider, 76 ResourceProvider* resource_provider,
186 int max_copy_texture_chromium_size, 77 int max_copy_texture_chromium_size,
187 bool use_partial_raster, 78 bool use_partial_raster,
188 int max_staging_buffer_usage_in_bytes, 79 int max_staging_buffer_usage_in_bytes,
189 ResourceFormat preferred_tile_format) { 80 ResourceFormat preferred_tile_format) {
190 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 81 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
191 task_runner, task_graph_runner, resource_provider, 82 task_runner, task_graph_runner, resource_provider,
192 max_copy_texture_chromium_size, use_partial_raster, 83 max_copy_texture_chromium_size, use_partial_raster,
193 max_staging_buffer_usage_in_bytes, preferred_tile_format)); 84 max_staging_buffer_usage_in_bytes, preferred_tile_format));
194 } 85 }
195 86
196 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 87 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
197 base::SequencedTaskRunner* task_runner, 88 base::SequencedTaskRunner* task_runner,
198 TaskGraphRunner* task_graph_runner, 89 TaskGraphRunner* task_graph_runner,
199 ResourceProvider* resource_provider, 90 ResourceProvider* resource_provider,
200 int max_copy_texture_chromium_size, 91 int max_copy_texture_chromium_size,
201 bool use_partial_raster, 92 bool use_partial_raster,
202 int max_staging_buffer_usage_in_bytes, 93 int max_staging_buffer_usage_in_bytes,
203 ResourceFormat preferred_tile_format) 94 ResourceFormat preferred_tile_format)
204 : task_runner_(task_runner), 95 : task_graph_runner_(task_graph_runner),
205 task_graph_runner_(task_graph_runner),
206 namespace_token_(task_graph_runner->GetNamespaceToken()), 96 namespace_token_(task_graph_runner->GetNamespaceToken()),
207 resource_provider_(resource_provider), 97 resource_provider_(resource_provider),
208 max_bytes_per_copy_operation_( 98 max_bytes_per_copy_operation_(
209 max_copy_texture_chromium_size 99 max_copy_texture_chromium_size
210 ? std::min(kMaxBytesPerCopyOperation, 100 ? std::min(kMaxBytesPerCopyOperation,
211 max_copy_texture_chromium_size) 101 max_copy_texture_chromium_size)
212 : kMaxBytesPerCopyOperation), 102 : kMaxBytesPerCopyOperation),
213 use_partial_raster_(use_partial_raster), 103 use_partial_raster_(use_partial_raster),
214 bytes_scheduled_since_last_flush_(0), 104 bytes_scheduled_since_last_flush_(0),
215 max_staging_buffer_usage_in_bytes_(max_staging_buffer_usage_in_bytes), 105 preferred_tile_format_(preferred_tile_format) {
216 preferred_tile_format_(preferred_tile_format), 106 staging_pool_ = StagingBufferPool::Create(task_runner, resource_provider,
217 staging_buffer_usage_in_bytes_(0), 107 use_partial_raster,
218 free_staging_buffer_usage_in_bytes_(0), 108 max_staging_buffer_usage_in_bytes);
219 staging_buffer_expiration_delay_(
220 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
221 reduce_memory_usage_pending_(false),
222 weak_ptr_factory_(this) {
223 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
224 this, "OneCopyTileTaskWorkerPool", base::ThreadTaskRunnerHandle::Get());
225 reduce_memory_usage_callback_ =
226 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
227 weak_ptr_factory_.GetWeakPtr());
228 } 109 }
229 110
230 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 111 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
231 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
232 this);
233 } 112 }
234 113
235 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 114 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
236 return this; 115 return this;
237 } 116 }
238 117
239 void OneCopyTileTaskWorkerPool::Shutdown() { 118 void OneCopyTileTaskWorkerPool::Shutdown() {
240 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 119 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
241 120
242 TaskGraph empty; 121 TaskGraph empty;
243 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 122 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
244 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 123 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
245 124
246 base::AutoLock lock(lock_); 125 staging_pool_->Shutdown();
247
248 if (buffers_.empty())
249 return;
250
251 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
252 DCHECK_EQ(staging_buffer_usage_in_bytes_, 0);
253 DCHECK_EQ(free_staging_buffer_usage_in_bytes_, 0);
254 } 126 }
255 127
256 void OneCopyTileTaskWorkerPool::ScheduleTasks(TaskGraph* graph) { 128 void OneCopyTileTaskWorkerPool::ScheduleTasks(TaskGraph* graph) {
257 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 129 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
258 130
259 ScheduleTasksOnOriginThread(this, graph); 131 ScheduleTasksOnOriginThread(this, graph);
260 132
261 // Barrier to sync any new resources to the worker context. 133 // Barrier to sync any new resources to the worker context.
262 resource_provider_->output_surface() 134 resource_provider_->output_surface()
263 ->context_provider() 135 ->context_provider()
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
319 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( 191 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
320 const Resource* resource, 192 const Resource* resource,
321 ResourceProvider::ScopedWriteLockGL* resource_lock, 193 ResourceProvider::ScopedWriteLockGL* resource_lock,
322 const RasterSource* raster_source, 194 const RasterSource* raster_source,
323 const gfx::Rect& raster_full_rect, 195 const gfx::Rect& raster_full_rect,
324 const gfx::Rect& raster_dirty_rect, 196 const gfx::Rect& raster_dirty_rect,
325 float scale, 197 float scale,
326 const RasterSource::PlaybackSettings& playback_settings, 198 const RasterSource::PlaybackSettings& playback_settings,
327 uint64_t previous_content_id, 199 uint64_t previous_content_id,
328 uint64_t new_content_id) { 200 uint64_t new_content_id) {
329 base::AutoLock lock(lock_); 201 scoped_ptr<StagingBuffer> staging_buffer =
202 staging_pool_->AcquireStagingBuffer(resource, previous_content_id);
330 203
331 scoped_ptr<StagingBuffer> staging_buffer = 204 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source,
332 AcquireStagingBuffer(resource, previous_content_id); 205 raster_full_rect, raster_dirty_rect, scale,
333 DCHECK(staging_buffer); 206 playback_settings, previous_content_id,
207 new_content_id);
334 208
335 { 209 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock,
336 base::AutoUnlock unlock(lock_); 210 raster_source, previous_content_id, new_content_id);
337 211
338 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we 212 staging_pool_->ReleaseStagingBuffer(std::move(staging_buffer));
339 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT. 213 }
340 if (!staging_buffer->gpu_memory_buffer) {
341 staging_buffer->gpu_memory_buffer =
342 resource_provider_->gpu_memory_buffer_manager()
343 ->AllocateGpuMemoryBuffer(
344 staging_buffer->size, BufferFormat(resource->format()),
345 use_partial_raster_
346 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT
347 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
348 0 /* surface_id */);
349 }
350 214
351 gfx::Rect playback_rect = raster_full_rect; 215 void OneCopyTileTaskWorkerPool::PlaybackToStagingBuffer(
352 if (use_partial_raster_ && previous_content_id) { 216 StagingBuffer* staging_buffer,
353 // Reduce playback rect to dirty region if the content id of the staging 217 const Resource* resource,
354 // buffer matches the prevous content id. 218 const RasterSource* raster_source,
355 if (previous_content_id == staging_buffer->content_id) 219 const gfx::Rect& raster_full_rect,
356 playback_rect.Intersect(raster_dirty_rect); 220 const gfx::Rect& raster_dirty_rect,
357 } 221 float scale,
358 222 const RasterSource::PlaybackSettings& playback_settings,
359 if (staging_buffer->gpu_memory_buffer) { 223 uint64_t previous_content_id,
360 gfx::GpuMemoryBuffer* buffer = staging_buffer->gpu_memory_buffer.get(); 224 uint64_t new_content_id) {
361 DCHECK_EQ(1u, gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat())); 225 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we
362 bool rv = buffer->Map(); 226 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT.
363 DCHECK(rv); 227 if (!staging_buffer->gpu_memory_buffer) {
364 DCHECK(buffer->memory(0)); 228 staging_buffer->gpu_memory_buffer =
365 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 229 resource_provider_->gpu_memory_buffer_manager()
366 DCHECK_GE(buffer->stride(0), 0); 230 ->AllocateGpuMemoryBuffer(
367 231 staging_buffer->size, BufferFormat(resource->format()),
368 DCHECK(!playback_rect.IsEmpty()) 232 use_partial_raster_
369 << "Why are we rastering a tile that's not dirty?"; 233 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT
370 TileTaskWorkerPool::PlaybackToMemory( 234 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
371 buffer->memory(0), resource->format(), staging_buffer->size, 235 0 /* surface_id */);
372 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
373 scale, playback_settings);
374 buffer->Unmap();
375 staging_buffer->content_id = new_content_id;
376 }
377 } 236 }
378 237
238 gfx::Rect playback_rect = raster_full_rect;
239 if (use_partial_raster_ && previous_content_id) {
240 // Reduce playback rect to dirty region if the content id of the staging
241 // buffer matches the prevous content id.
242 if (previous_content_id == staging_buffer->content_id)
243 playback_rect.Intersect(raster_dirty_rect);
244 }
245
246 if (staging_buffer->gpu_memory_buffer) {
247 gfx::GpuMemoryBuffer* buffer = staging_buffer->gpu_memory_buffer.get();
248 DCHECK_EQ(1u, gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat()));
249 bool rv = buffer->Map();
250 DCHECK(rv);
251 DCHECK(buffer->memory(0));
252 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
253 DCHECK_GE(buffer->stride(0), 0);
254
255 DCHECK(!playback_rect.IsEmpty())
256 << "Why are we rastering a tile that's not dirty?";
257 TileTaskWorkerPool::PlaybackToMemory(
258 buffer->memory(0), resource->format(), staging_buffer->size,
259 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
260 scale, playback_settings);
261 buffer->Unmap();
262 staging_buffer->content_id = new_content_id;
263 }
264 }
265
266 void OneCopyTileTaskWorkerPool::CopyOnWorkerThread(
267 StagingBuffer* staging_buffer,
268 const Resource* resource,
269 ResourceProvider::ScopedWriteLockGL* resource_lock,
270 const RasterSource* raster_source,
271 uint64_t previous_content_id,
272 uint64_t new_content_id) {
379 ContextProvider* context_provider = 273 ContextProvider* context_provider =
380 resource_provider_->output_surface()->worker_context_provider(); 274 resource_provider_->output_surface()->worker_context_provider();
381 DCHECK(context_provider); 275 DCHECK(context_provider);
382 276
383 { 277 {
384 ContextProvider::ScopedContextLock scoped_context(context_provider); 278 ContextProvider::ScopedContextLock scoped_context(context_provider);
385 279
386 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); 280 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
387 DCHECK(gl); 281 DCHECK(gl);
388 282
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
479 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); 373 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
480 374
481 // Barrier to sync worker context output to cc context. 375 // Barrier to sync worker context output to cc context.
482 gl->OrderingBarrierCHROMIUM(); 376 gl->OrderingBarrierCHROMIUM();
483 377
484 // Generate sync token after the barrier for cross context synchronization. 378 // Generate sync token after the barrier for cross context synchronization.
485 gpu::SyncToken sync_token; 379 gpu::SyncToken sync_token;
486 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); 380 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData());
487 resource_lock->UpdateResourceSyncToken(sync_token); 381 resource_lock->UpdateResourceSyncToken(sync_token);
488 } 382 }
489
490 staging_buffer->last_usage = base::TimeTicks::Now();
491 busy_buffers_.push_back(std::move(staging_buffer));
492
493 ScheduleReduceMemoryUsage();
494 }
495
496 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
497 const base::trace_event::MemoryDumpArgs& args,
498 base::trace_event::ProcessMemoryDump* pmd) {
499 base::AutoLock lock(lock_);
500
501 for (const auto* buffer : buffers_) {
502 auto in_free_buffers =
503 std::find_if(free_buffers_.begin(), free_buffers_.end(),
504 [buffer](const scoped_ptr<StagingBuffer>& b) {
505 return b.get() == buffer;
506 });
507 buffer->OnMemoryDump(pmd, buffer->format,
508 in_free_buffers != free_buffers_.end());
509 }
510
511 return true;
512 }
513
514 void OneCopyTileTaskWorkerPool::AddStagingBuffer(
515 const StagingBuffer* staging_buffer,
516 ResourceFormat format) {
517 lock_.AssertAcquired();
518
519 DCHECK(buffers_.find(staging_buffer) == buffers_.end());
520 buffers_.insert(staging_buffer);
521 int buffer_usage_in_bytes =
522 ResourceUtil::UncheckedSizeInBytes<int>(staging_buffer->size, format);
523 staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes;
524 }
525
526 void OneCopyTileTaskWorkerPool::RemoveStagingBuffer(
527 const StagingBuffer* staging_buffer) {
528 lock_.AssertAcquired();
529
530 DCHECK(buffers_.find(staging_buffer) != buffers_.end());
531 buffers_.erase(staging_buffer);
532 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
533 staging_buffer->size, staging_buffer->format);
534 DCHECK_GE(staging_buffer_usage_in_bytes_, buffer_usage_in_bytes);
535 staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes;
536 }
537
538 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsFree(
539 const StagingBuffer* staging_buffer) {
540 lock_.AssertAcquired();
541
542 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
543 staging_buffer->size, staging_buffer->format);
544 free_staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes;
545 }
546
547 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsBusy(
548 const StagingBuffer* staging_buffer) {
549 lock_.AssertAcquired();
550
551 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
552 staging_buffer->size, staging_buffer->format);
553 DCHECK_GE(free_staging_buffer_usage_in_bytes_, buffer_usage_in_bytes);
554 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes;
555 }
556
557 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer>
558 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource,
559 uint64_t previous_content_id) {
560 lock_.AssertAcquired();
561
562 scoped_ptr<StagingBuffer> staging_buffer;
563
564 ContextProvider* context_provider =
565 resource_provider_->output_surface()->worker_context_provider();
566 DCHECK(context_provider);
567
568 ContextProvider::ScopedContextLock scoped_context(context_provider);
569
570 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
571 DCHECK(gl);
572
573 // Check if any busy buffers have become available.
574 if (resource_provider_->use_sync_query()) {
575 while (!busy_buffers_.empty()) {
576 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
577 break;
578
579 MarkStagingBufferAsFree(busy_buffers_.front().get());
580 free_buffers_.push_back(PopFront(&busy_buffers_));
581 }
582 }
583
584 // Wait for memory usage of non-free buffers to become less than the limit.
585 while (
586 (staging_buffer_usage_in_bytes_ - free_staging_buffer_usage_in_bytes_) >=
587 max_staging_buffer_usage_in_bytes_) {
588 // Stop when there are no more busy buffers to wait for.
589 if (busy_buffers_.empty())
590 break;
591
592 if (resource_provider_->use_sync_query()) {
593 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
594 MarkStagingBufferAsFree(busy_buffers_.front().get());
595 free_buffers_.push_back(PopFront(&busy_buffers_));
596 } else {
597 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
598 gl->Finish();
599 while (!busy_buffers_.empty()) {
600 MarkStagingBufferAsFree(busy_buffers_.front().get());
601 free_buffers_.push_back(PopFront(&busy_buffers_));
602 }
603 }
604 }
605
606 // Find a staging buffer that allows us to perform partial raster when
607 // using persistent GpuMemoryBuffers.
608 if (use_partial_raster_ && previous_content_id) {
609 StagingBufferDeque::iterator it = std::find_if(
610 free_buffers_.begin(), free_buffers_.end(),
611 [previous_content_id](const scoped_ptr<StagingBuffer>& buffer) {
612 return buffer->content_id == previous_content_id;
613 });
614 if (it != free_buffers_.end()) {
615 staging_buffer = std::move(*it);
616 free_buffers_.erase(it);
617 MarkStagingBufferAsBusy(staging_buffer.get());
618 }
619 }
620
621 // Find staging buffer of correct size and format.
622 if (!staging_buffer) {
623 StagingBufferDeque::iterator it =
624 std::find_if(free_buffers_.begin(), free_buffers_.end(),
625 [resource](const scoped_ptr<StagingBuffer>& buffer) {
626 return buffer->size == resource->size() &&
627 buffer->format == resource->format();
628 });
629 if (it != free_buffers_.end()) {
630 staging_buffer = std::move(*it);
631 free_buffers_.erase(it);
632 MarkStagingBufferAsBusy(staging_buffer.get());
633 }
634 }
635
636 // Create new staging buffer if necessary.
637 if (!staging_buffer) {
638 staging_buffer = make_scoped_ptr(
639 new StagingBuffer(resource->size(), resource->format()));
640 AddStagingBuffer(staging_buffer.get(), resource->format());
641 }
642
643 // Release enough free buffers to stay within the limit.
644 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) {
645 if (free_buffers_.empty())
646 break;
647
648 free_buffers_.front()->DestroyGLResources(gl);
649 MarkStagingBufferAsBusy(free_buffers_.front().get());
650 RemoveStagingBuffer(free_buffers_.front().get());
651 free_buffers_.pop_front();
652 }
653
654 return staging_buffer;
655 }
656
657 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
658 lock_.AssertAcquired();
659
660 if (!free_buffers_.empty())
661 return free_buffers_.front()->last_usage;
662
663 if (!busy_buffers_.empty())
664 return busy_buffers_.front()->last_usage;
665
666 return base::TimeTicks();
667 }
668
669 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
670 lock_.AssertAcquired();
671
672 if (reduce_memory_usage_pending_)
673 return;
674
675 reduce_memory_usage_pending_ = true;
676
677 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer
678 // should be released.
679 base::TimeTicks reduce_memory_usage_time =
680 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
681 task_runner_->PostDelayedTask(
682 FROM_HERE, reduce_memory_usage_callback_,
683 reduce_memory_usage_time - base::TimeTicks::Now());
684 }
685
686 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
687 base::AutoLock lock(lock_);
688
689 reduce_memory_usage_pending_ = false;
690
691 if (free_buffers_.empty() && busy_buffers_.empty())
692 return;
693
694 base::TimeTicks current_time = base::TimeTicks::Now();
695 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
696
697 if (free_buffers_.empty() && busy_buffers_.empty())
698 return;
699
700 reduce_memory_usage_pending_ = true;
701
702 // Schedule another call to ReduceMemoryUsage at the time when the next
703 // buffer should be released.
704 base::TimeTicks reduce_memory_usage_time =
705 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
706 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
707 reduce_memory_usage_time - current_time);
708 }
709
710 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
711 base::TimeTicks time) {
712 lock_.AssertAcquired();
713
714 ContextProvider* context_provider =
715 resource_provider_->output_surface()->worker_context_provider();
716 DCHECK(context_provider);
717
718 {
719 ContextProvider::ScopedContextLock scoped_context(context_provider);
720
721 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
722 DCHECK(gl);
723
724 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
725 // buffers as soon as we find a buffer that has been used since |time|.
726 while (!free_buffers_.empty()) {
727 if (free_buffers_.front()->last_usage > time)
728 return;
729
730 free_buffers_.front()->DestroyGLResources(gl);
731 MarkStagingBufferAsBusy(free_buffers_.front().get());
732 RemoveStagingBuffer(free_buffers_.front().get());
733 free_buffers_.pop_front();
734 }
735
736 while (!busy_buffers_.empty()) {
737 if (busy_buffers_.front()->last_usage > time)
738 return;
739
740 busy_buffers_.front()->DestroyGLResources(gl);
741 RemoveStagingBuffer(busy_buffers_.front().get());
742 busy_buffers_.pop_front();
743 }
744 }
745 } 383 }
746 384
747 } // namespace cc 385 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/staging_buffer_pool.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698