Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(189)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1861623003: cc: Refactor OneCopyTileTaskWorkerPool. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@dependency_task
Patch Set: refactor one copy. Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <limits> 10 #include <limits>
11 #include <utility> 11 #include <utility>
12 12
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/thread_task_runner_handle.h"
16 #include "base/trace_event/memory_dump_manager.h"
17 #include "base/trace_event/trace_event.h"
18 #include "base/trace_event/trace_event_argument.h"
19 #include "cc/base/container_util.h"
20 #include "cc/base/math_util.h" 14 #include "cc/base/math_util.h"
21 #include "cc/debug/traced_value.h" 15 #include "cc/debug/traced_value.h"
16 #include "cc/raster/staging_buffer_pool.h"
22 #include "cc/resources/platform_color.h" 17 #include "cc/resources/platform_color.h"
23 #include "cc/resources/resource_format.h" 18 #include "cc/resources/resource_format.h"
24 #include "cc/resources/resource_util.h" 19 #include "cc/resources/resource_util.h"
25 #include "cc/resources/scoped_resource.h" 20 #include "cc/resources/scoped_resource.h"
26 #include "gpu/GLES2/gl2extchromium.h" 21 #include "gpu/GLES2/gl2extchromium.h"
27 #include "gpu/command_buffer/client/gles2_interface.h" 22 #include "gpu/command_buffer/client/gles2_interface.h"
28 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
29 #include "ui/gfx/buffer_format_util.h" 24 #include "ui/gfx/buffer_format_util.h"
30 25
31 namespace cc { 26 namespace cc {
27
32 namespace { 28 namespace {
33 29
34 class RasterBufferImpl : public RasterBuffer { 30 class RasterBufferImpl : public RasterBuffer {
35 public: 31 public:
36 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 32 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
37 ResourceProvider* resource_provider, 33 ResourceProvider* resource_provider,
38 ResourceFormat resource_format, 34 ResourceFormat resource_format,
39 const Resource* resource, 35 const Resource* resource,
40 uint64_t previous_content_id) 36 uint64_t previous_content_id)
41 : worker_pool_(worker_pool), 37 : worker_pool_(worker_pool),
(...skipping 18 matching lines...) Expand all
60 56
61 private: 57 private:
62 OneCopyTileTaskWorkerPool* worker_pool_; 58 OneCopyTileTaskWorkerPool* worker_pool_;
63 const Resource* resource_; 59 const Resource* resource_;
64 ResourceProvider::ScopedWriteLockGL lock_; 60 ResourceProvider::ScopedWriteLockGL lock_;
65 uint64_t previous_content_id_; 61 uint64_t previous_content_id_;
66 62
67 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 63 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
68 }; 64 };
69 65
70 // Delay between checking for query result to be available.
71 const int kCheckForQueryResultAvailableTickRateMs = 1;
72
73 // Number of attempts to allow before we perform a check that will wait for
74 // query to complete.
75 const int kMaxCheckForQueryResultAvailableAttempts = 256;
76
77 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 66 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
78 // default batch size for copy operations. 67 // default batch size for copy operations.
79 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 68 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
80 69
81 // Delay before a staging buffer might be released.
82 const int kStagingBufferExpirationDelayMs = 1000;
83
84 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
85 unsigned complete = 1;
86 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
87 return !!complete;
88 }
89
90 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
91 TRACE_EVENT0("cc", "WaitForQueryResult");
92
93 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
94 while (attempts_left--) {
95 if (CheckForQueryResult(gl, query_id))
96 break;
97
98 // We have to flush the context to be guaranteed that a query result will
99 // be available in a finite amount of time.
100 gl->ShallowFlushCHROMIUM();
101
102 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
103 kCheckForQueryResultAvailableTickRateMs));
104 }
105
106 unsigned result = 0;
107 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
108 }
109
110 } // namespace 70 } // namespace
111 71
112 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size,
113 ResourceFormat format)
114 : size(size),
115 format(format),
116 texture_id(0),
117 image_id(0),
118 query_id(0),
119 content_id(0) {}
120
121 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
122 DCHECK_EQ(texture_id, 0u);
123 DCHECK_EQ(image_id, 0u);
124 DCHECK_EQ(query_id, 0u);
125 }
126
127 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
128 gpu::gles2::GLES2Interface* gl) {
129 if (query_id) {
130 gl->DeleteQueriesEXT(1, &query_id);
131 query_id = 0;
132 }
133 if (image_id) {
134 gl->DestroyImageCHROMIUM(image_id);
135 image_id = 0;
136 }
137 if (texture_id) {
138 gl->DeleteTextures(1, &texture_id);
139 texture_id = 0;
140 }
141 }
142
143 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
144 base::trace_event::ProcessMemoryDump* pmd,
145 ResourceFormat format,
146 bool in_free_list) const {
147 if (!gpu_memory_buffer)
148 return;
149
150 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
151 std::string buffer_dump_name =
152 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id);
153 base::trace_event::MemoryAllocatorDump* buffer_dump =
154 pmd->CreateAllocatorDump(buffer_dump_name);
155
156 uint64_t buffer_size_in_bytes =
157 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
158 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
159 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
160 buffer_size_in_bytes);
161 buffer_dump->AddScalar("free_size",
162 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
163 in_free_list ? buffer_size_in_bytes : 0);
164
165 // Emit an ownership edge towards a global allocator dump node.
166 const uint64_t tracing_process_id =
167 base::trace_event::MemoryDumpManager::GetInstance()
168 ->GetTracingProcessId();
169 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
170 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
171 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
172
173 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
174 // the tracing UI will account the effective size of the buffer to the child.
175 const int kImportance = 2;
176 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
177 }
178
179 // static 72 // static
180 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 73 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
181 base::SequencedTaskRunner* task_runner, 74 base::SequencedTaskRunner* task_runner,
182 TaskGraphRunner* task_graph_runner, 75 TaskGraphRunner* task_graph_runner,
183 ContextProvider* context_provider, 76 ContextProvider* context_provider,
184 ResourceProvider* resource_provider, 77 ResourceProvider* resource_provider,
185 int max_copy_texture_chromium_size, 78 int max_copy_texture_chromium_size,
186 bool use_partial_raster, 79 bool use_partial_raster,
187 int max_staging_buffer_usage_in_bytes, 80 int max_staging_buffer_usage_in_bytes,
188 ResourceFormat preferred_tile_format) { 81 ResourceFormat preferred_tile_format) {
189 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 82 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
190 task_runner, task_graph_runner, resource_provider, 83 task_runner, task_graph_runner, resource_provider,
191 max_copy_texture_chromium_size, use_partial_raster, 84 max_copy_texture_chromium_size, use_partial_raster,
192 max_staging_buffer_usage_in_bytes, preferred_tile_format)); 85 max_staging_buffer_usage_in_bytes, preferred_tile_format));
193 } 86 }
194 87
195 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 88 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
196 base::SequencedTaskRunner* task_runner, 89 base::SequencedTaskRunner* task_runner,
197 TaskGraphRunner* task_graph_runner, 90 TaskGraphRunner* task_graph_runner,
198 ResourceProvider* resource_provider, 91 ResourceProvider* resource_provider,
199 int max_copy_texture_chromium_size, 92 int max_copy_texture_chromium_size,
200 bool use_partial_raster, 93 bool use_partial_raster,
201 int max_staging_buffer_usage_in_bytes, 94 int max_staging_buffer_usage_in_bytes,
202 ResourceFormat preferred_tile_format) 95 ResourceFormat preferred_tile_format)
203 : task_runner_(task_runner), 96 : task_graph_runner_(task_graph_runner),
204 task_graph_runner_(task_graph_runner),
205 namespace_token_(task_graph_runner->GetNamespaceToken()), 97 namespace_token_(task_graph_runner->GetNamespaceToken()),
206 resource_provider_(resource_provider), 98 resource_provider_(resource_provider),
207 max_bytes_per_copy_operation_( 99 max_bytes_per_copy_operation_(
208 max_copy_texture_chromium_size 100 max_copy_texture_chromium_size
209 ? std::min(kMaxBytesPerCopyOperation, 101 ? std::min(kMaxBytesPerCopyOperation,
210 max_copy_texture_chromium_size) 102 max_copy_texture_chromium_size)
211 : kMaxBytesPerCopyOperation), 103 : kMaxBytesPerCopyOperation),
212 use_partial_raster_(use_partial_raster), 104 use_partial_raster_(use_partial_raster),
213 bytes_scheduled_since_last_flush_(0), 105 bytes_scheduled_since_last_flush_(0),
214 max_staging_buffer_usage_in_bytes_(max_staging_buffer_usage_in_bytes), 106 preferred_tile_format_(preferred_tile_format) {
215 preferred_tile_format_(preferred_tile_format), 107 staging_pool_ = StagingBufferPool::Create(task_runner, resource_provider,
216 staging_buffer_usage_in_bytes_(0), 108 use_partial_raster,
217 free_staging_buffer_usage_in_bytes_(0), 109 max_staging_buffer_usage_in_bytes);
218 staging_buffer_expiration_delay_(
219 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
220 reduce_memory_usage_pending_(false),
221 weak_ptr_factory_(this) {
222 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
223 this, "OneCopyTileTaskWorkerPool", base::ThreadTaskRunnerHandle::Get());
224 reduce_memory_usage_callback_ =
225 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
226 weak_ptr_factory_.GetWeakPtr());
227 } 110 }
228 111
229 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 112 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
230 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
231 this);
232 } 113 }
233 114
234 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 115 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
235 return this; 116 return this;
236 } 117 }
237 118
238 void OneCopyTileTaskWorkerPool::Shutdown() { 119 void OneCopyTileTaskWorkerPool::Shutdown() {
239 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 120 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
240 121
241 TaskGraph empty; 122 TaskGraph empty;
242 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 123 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
243 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 124 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
244 125
245 base::AutoLock lock(lock_); 126 staging_pool_->Shutdown();
246
247 if (buffers_.empty())
248 return;
249
250 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
251 DCHECK_EQ(staging_buffer_usage_in_bytes_, 0);
252 DCHECK_EQ(free_staging_buffer_usage_in_bytes_, 0);
253 } 127 }
254 128
255 void OneCopyTileTaskWorkerPool::ScheduleTasks(TaskGraph* graph) { 129 void OneCopyTileTaskWorkerPool::ScheduleTasks(TaskGraph* graph) {
256 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 130 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
257 131
258 ScheduleTasksOnOriginThread(graph); 132 ScheduleTasksOnOriginThread(graph);
259 133
260 // Barrier to sync any new resources to the worker context. 134 // Barrier to sync any new resources to the worker context.
261 resource_provider_->output_surface() 135 resource_provider_->output_surface()
262 ->context_provider() 136 ->context_provider()
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
318 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( 192 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
319 const Resource* resource, 193 const Resource* resource,
320 ResourceProvider::ScopedWriteLockGL* resource_lock, 194 ResourceProvider::ScopedWriteLockGL* resource_lock,
321 const RasterSource* raster_source, 195 const RasterSource* raster_source,
322 const gfx::Rect& raster_full_rect, 196 const gfx::Rect& raster_full_rect,
323 const gfx::Rect& raster_dirty_rect, 197 const gfx::Rect& raster_dirty_rect,
324 float scale, 198 float scale,
325 const RasterSource::PlaybackSettings& playback_settings, 199 const RasterSource::PlaybackSettings& playback_settings,
326 uint64_t previous_content_id, 200 uint64_t previous_content_id,
327 uint64_t new_content_id) { 201 uint64_t new_content_id) {
328 base::AutoLock lock(lock_); 202 scoped_ptr<StagingBuffer> staging_buffer =
203 staging_pool_->AcquireStagingBuffer(resource, previous_content_id);
329 204
330 scoped_ptr<StagingBuffer> staging_buffer = 205 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source,
331 AcquireStagingBuffer(resource, previous_content_id); 206 raster_full_rect, raster_dirty_rect, scale,
332 DCHECK(staging_buffer); 207 playback_settings, previous_content_id,
208 new_content_id);
333 209
334 { 210 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock,
335 base::AutoUnlock unlock(lock_); 211 raster_source, previous_content_id, new_content_id);
336 212
337 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we 213 staging_pool_->ReleaseStagingBuffer(std::move(staging_buffer));
338 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT. 214 }
339 if (!staging_buffer->gpu_memory_buffer) {
340 staging_buffer->gpu_memory_buffer =
341 resource_provider_->gpu_memory_buffer_manager()
342 ->AllocateGpuMemoryBuffer(
343 staging_buffer->size, BufferFormat(resource->format()),
344 use_partial_raster_
345 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT
346 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE);
347 }
348 215
349 gfx::Rect playback_rect = raster_full_rect; 216 void OneCopyTileTaskWorkerPool::PlaybackToStagingBuffer(
350 if (use_partial_raster_ && previous_content_id) { 217 StagingBuffer* staging_buffer,
351 // Reduce playback rect to dirty region if the content id of the staging 218 const Resource* resource,
352 // buffer matches the prevous content id. 219 const RasterSource* raster_source,
353 if (previous_content_id == staging_buffer->content_id) 220 const gfx::Rect& raster_full_rect,
354 playback_rect.Intersect(raster_dirty_rect); 221 const gfx::Rect& raster_dirty_rect,
355 } 222 float scale,
356 223 const RasterSource::PlaybackSettings& playback_settings,
357 if (staging_buffer->gpu_memory_buffer) { 224 uint64_t previous_content_id,
358 gfx::GpuMemoryBuffer* buffer = staging_buffer->gpu_memory_buffer.get(); 225 uint64_t new_content_id) {
359 DCHECK_EQ(1u, gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat())); 226 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we
360 bool rv = buffer->Map(); 227 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT.
361 DCHECK(rv); 228 if (!staging_buffer->gpu_memory_buffer) {
362 DCHECK(buffer->memory(0)); 229 staging_buffer->gpu_memory_buffer =
363 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 230 resource_provider_->gpu_memory_buffer_manager()
364 DCHECK_GE(buffer->stride(0), 0); 231 ->AllocateGpuMemoryBuffer(
365 232 staging_buffer->size, BufferFormat(resource->format()),
366 DCHECK(!playback_rect.IsEmpty()) 233 use_partial_raster_
367 << "Why are we rastering a tile that's not dirty?"; 234 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT
368 TileTaskWorkerPool::PlaybackToMemory( 235 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE);
369 buffer->memory(0), resource->format(), staging_buffer->size,
370 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
371 scale, playback_settings);
372 buffer->Unmap();
373 staging_buffer->content_id = new_content_id;
374 }
375 } 236 }
376 237
238 gfx::Rect playback_rect = raster_full_rect;
239 if (use_partial_raster_ && previous_content_id) {
240 // Reduce playback rect to dirty region if the content id of the staging
241 // buffer matches the prevous content id.
242 if (previous_content_id == staging_buffer->content_id)
243 playback_rect.Intersect(raster_dirty_rect);
244 }
245
246 if (staging_buffer->gpu_memory_buffer) {
247 gfx::GpuMemoryBuffer* buffer = staging_buffer->gpu_memory_buffer.get();
248 DCHECK_EQ(1u, gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat()));
249 bool rv = buffer->Map();
250 DCHECK(rv);
251 DCHECK(buffer->memory(0));
252 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
253 DCHECK_GE(buffer->stride(0), 0);
254
255 DCHECK(!playback_rect.IsEmpty())
256 << "Why are we rastering a tile that's not dirty?";
257 TileTaskWorkerPool::PlaybackToMemory(
258 buffer->memory(0), resource->format(), staging_buffer->size,
259 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
260 scale, playback_settings);
261 buffer->Unmap();
262 staging_buffer->content_id = new_content_id;
263 }
264 }
265
266 void OneCopyTileTaskWorkerPool::CopyOnWorkerThread(
267 StagingBuffer* staging_buffer,
268 const Resource* resource,
269 ResourceProvider::ScopedWriteLockGL* resource_lock,
270 const RasterSource* raster_source,
271 uint64_t previous_content_id,
272 uint64_t new_content_id) {
377 ContextProvider* context_provider = 273 ContextProvider* context_provider =
378 resource_provider_->output_surface()->worker_context_provider(); 274 resource_provider_->output_surface()->worker_context_provider();
379 DCHECK(context_provider); 275 DCHECK(context_provider);
380 276
381 { 277 {
382 ContextProvider::ScopedContextLock scoped_context(context_provider); 278 ContextProvider::ScopedContextLock scoped_context(context_provider);
383 279
384 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); 280 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
385 DCHECK(gl); 281 DCHECK(gl);
386 282
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 while (y < height) { 343 while (y < height) {
448 // Copy at most |chunk_size_in_rows|. 344 // Copy at most |chunk_size_in_rows|.
449 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 345 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
450 DCHECK_GT(rows_to_copy, 0); 346 DCHECK_GT(rows_to_copy, 0);
451 347
452 gl->CopySubTextureCHROMIUM( 348 gl->CopySubTextureCHROMIUM(
453 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y, 349 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y,
454 resource->size().width(), rows_to_copy, false, false, false); 350 resource->size().width(), rows_to_copy, false, false, false);
455 y += rows_to_copy; 351 y += rows_to_copy;
456 352
457 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory 353 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
ericrk 2016/04/07 19:53:47 This used to be guarded by lock_ - the usage is st
prashant.n 2016/04/08 16:33:16 Due to this I removed dependency of lock. but thi
458 // used for this copy operation. 354 // used for this copy operation.
459 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; 355 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
460 356
461 if (bytes_scheduled_since_last_flush_ >= 357 if (bytes_scheduled_since_last_flush_ >=
462 max_bytes_per_copy_operation_) { 358 max_bytes_per_copy_operation_) {
463 gl->ShallowFlushCHROMIUM(); 359 gl->ShallowFlushCHROMIUM();
464 bytes_scheduled_since_last_flush_ = 0; 360 bytes_scheduled_since_last_flush_ = 0;
465 } 361 }
466 } 362 }
467 } 363 }
468 364
469 if (resource_provider_->use_sync_query()) { 365 if (resource_provider_->use_sync_query()) {
470 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) 366 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
471 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); 367 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
472 #else 368 #else
473 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); 369 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
474 #endif 370 #endif
475 } 371 }
476 372
477 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); 373 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
478 374
479 // Barrier to sync worker context output to cc context. 375 // Barrier to sync worker context output to cc context.
480 gl->OrderingBarrierCHROMIUM(); 376 gl->OrderingBarrierCHROMIUM();
481 377
482 // Generate sync token after the barrier for cross context synchronization. 378 // Generate sync token after the barrier for cross context synchronization.
483 gpu::SyncToken sync_token; 379 gpu::SyncToken sync_token;
484 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); 380 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData());
485 resource_lock->UpdateResourceSyncToken(sync_token); 381 resource_lock->UpdateResourceSyncToken(sync_token);
486 } 382 }
487
488 staging_buffer->last_usage = base::TimeTicks::Now();
489 busy_buffers_.push_back(std::move(staging_buffer));
490
491 ScheduleReduceMemoryUsage();
492 }
493
494 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
495 const base::trace_event::MemoryDumpArgs& args,
496 base::trace_event::ProcessMemoryDump* pmd) {
497 base::AutoLock lock(lock_);
498
499 for (const auto* buffer : buffers_) {
500 auto in_free_buffers =
501 std::find_if(free_buffers_.begin(), free_buffers_.end(),
502 [buffer](const scoped_ptr<StagingBuffer>& b) {
503 return b.get() == buffer;
504 });
505 buffer->OnMemoryDump(pmd, buffer->format,
506 in_free_buffers != free_buffers_.end());
507 }
508
509 return true;
510 }
511
512 void OneCopyTileTaskWorkerPool::AddStagingBuffer(
513 const StagingBuffer* staging_buffer,
514 ResourceFormat format) {
515 lock_.AssertAcquired();
516
517 DCHECK(buffers_.find(staging_buffer) == buffers_.end());
518 buffers_.insert(staging_buffer);
519 int buffer_usage_in_bytes =
520 ResourceUtil::UncheckedSizeInBytes<int>(staging_buffer->size, format);
521 staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes;
522 }
523
524 void OneCopyTileTaskWorkerPool::RemoveStagingBuffer(
525 const StagingBuffer* staging_buffer) {
526 lock_.AssertAcquired();
527
528 DCHECK(buffers_.find(staging_buffer) != buffers_.end());
529 buffers_.erase(staging_buffer);
530 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
531 staging_buffer->size, staging_buffer->format);
532 DCHECK_GE(staging_buffer_usage_in_bytes_, buffer_usage_in_bytes);
533 staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes;
534 }
535
536 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsFree(
537 const StagingBuffer* staging_buffer) {
538 lock_.AssertAcquired();
539
540 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
541 staging_buffer->size, staging_buffer->format);
542 free_staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes;
543 }
544
545 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsBusy(
546 const StagingBuffer* staging_buffer) {
547 lock_.AssertAcquired();
548
549 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>(
550 staging_buffer->size, staging_buffer->format);
551 DCHECK_GE(free_staging_buffer_usage_in_bytes_, buffer_usage_in_bytes);
552 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes;
553 }
554
555 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer>
556 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource,
557 uint64_t previous_content_id) {
558 lock_.AssertAcquired();
559
560 scoped_ptr<StagingBuffer> staging_buffer;
561
562 ContextProvider* context_provider =
563 resource_provider_->output_surface()->worker_context_provider();
564 DCHECK(context_provider);
565
566 ContextProvider::ScopedContextLock scoped_context(context_provider);
567
568 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
569 DCHECK(gl);
570
571 // Check if any busy buffers have become available.
572 if (resource_provider_->use_sync_query()) {
573 while (!busy_buffers_.empty()) {
574 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
575 break;
576
577 MarkStagingBufferAsFree(busy_buffers_.front().get());
578 free_buffers_.push_back(PopFront(&busy_buffers_));
579 }
580 }
581
582 // Wait for memory usage of non-free buffers to become less than the limit.
583 while (
584 (staging_buffer_usage_in_bytes_ - free_staging_buffer_usage_in_bytes_) >=
585 max_staging_buffer_usage_in_bytes_) {
586 // Stop when there are no more busy buffers to wait for.
587 if (busy_buffers_.empty())
588 break;
589
590 if (resource_provider_->use_sync_query()) {
591 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
592 MarkStagingBufferAsFree(busy_buffers_.front().get());
593 free_buffers_.push_back(PopFront(&busy_buffers_));
594 } else {
595 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
596 gl->Finish();
597 while (!busy_buffers_.empty()) {
598 MarkStagingBufferAsFree(busy_buffers_.front().get());
599 free_buffers_.push_back(PopFront(&busy_buffers_));
600 }
601 }
602 }
603
604 // Find a staging buffer that allows us to perform partial raster when
605 // using persistent GpuMemoryBuffers.
606 if (use_partial_raster_ && previous_content_id) {
607 StagingBufferDeque::iterator it = std::find_if(
608 free_buffers_.begin(), free_buffers_.end(),
609 [previous_content_id](const scoped_ptr<StagingBuffer>& buffer) {
610 return buffer->content_id == previous_content_id;
611 });
612 if (it != free_buffers_.end()) {
613 staging_buffer = std::move(*it);
614 free_buffers_.erase(it);
615 MarkStagingBufferAsBusy(staging_buffer.get());
616 }
617 }
618
619 // Find staging buffer of correct size and format.
620 if (!staging_buffer) {
621 StagingBufferDeque::iterator it =
622 std::find_if(free_buffers_.begin(), free_buffers_.end(),
623 [resource](const scoped_ptr<StagingBuffer>& buffer) {
624 return buffer->size == resource->size() &&
625 buffer->format == resource->format();
626 });
627 if (it != free_buffers_.end()) {
628 staging_buffer = std::move(*it);
629 free_buffers_.erase(it);
630 MarkStagingBufferAsBusy(staging_buffer.get());
631 }
632 }
633
634 // Create new staging buffer if necessary.
635 if (!staging_buffer) {
636 staging_buffer = make_scoped_ptr(
637 new StagingBuffer(resource->size(), resource->format()));
638 AddStagingBuffer(staging_buffer.get(), resource->format());
639 }
640
641 // Release enough free buffers to stay within the limit.
642 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) {
643 if (free_buffers_.empty())
644 break;
645
646 free_buffers_.front()->DestroyGLResources(gl);
647 MarkStagingBufferAsBusy(free_buffers_.front().get());
648 RemoveStagingBuffer(free_buffers_.front().get());
649 free_buffers_.pop_front();
650 }
651
652 return staging_buffer;
653 }
654
655 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
656 lock_.AssertAcquired();
657
658 if (!free_buffers_.empty())
659 return free_buffers_.front()->last_usage;
660
661 if (!busy_buffers_.empty())
662 return busy_buffers_.front()->last_usage;
663
664 return base::TimeTicks();
665 }
666
667 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
668 lock_.AssertAcquired();
669
670 if (reduce_memory_usage_pending_)
671 return;
672
673 reduce_memory_usage_pending_ = true;
674
675 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer
676 // should be released.
677 base::TimeTicks reduce_memory_usage_time =
678 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
679 task_runner_->PostDelayedTask(
680 FROM_HERE, reduce_memory_usage_callback_,
681 reduce_memory_usage_time - base::TimeTicks::Now());
682 }
683
684 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
685 base::AutoLock lock(lock_);
686
687 reduce_memory_usage_pending_ = false;
688
689 if (free_buffers_.empty() && busy_buffers_.empty())
690 return;
691
692 base::TimeTicks current_time = base::TimeTicks::Now();
693 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
694
695 if (free_buffers_.empty() && busy_buffers_.empty())
696 return;
697
698 reduce_memory_usage_pending_ = true;
699
700 // Schedule another call to ReduceMemoryUsage at the time when the next
701 // buffer should be released.
702 base::TimeTicks reduce_memory_usage_time =
703 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
704 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
705 reduce_memory_usage_time - current_time);
706 }
707
708 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
709 base::TimeTicks time) {
710 lock_.AssertAcquired();
711
712 ContextProvider* context_provider =
713 resource_provider_->output_surface()->worker_context_provider();
714 DCHECK(context_provider);
715
716 {
717 ContextProvider::ScopedContextLock scoped_context(context_provider);
718
719 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
720 DCHECK(gl);
721
722 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
723 // buffers as soon as we find a buffer that has been used since |time|.
724 while (!free_buffers_.empty()) {
725 if (free_buffers_.front()->last_usage > time)
726 return;
727
728 free_buffers_.front()->DestroyGLResources(gl);
729 MarkStagingBufferAsBusy(free_buffers_.front().get());
730 RemoveStagingBuffer(free_buffers_.front().get());
731 free_buffers_.pop_front();
732 }
733
734 while (!busy_buffers_.empty()) {
735 if (busy_buffers_.front()->last_usage > time)
736 return;
737
738 busy_buffers_.front()->DestroyGLResources(gl);
739 RemoveStagingBuffer(busy_buffers_.front().get());
740 busy_buffers_.pop_front();
741 }
742 }
743 } 383 }
744 384
745 } // namespace cc 385 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698