Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(132)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1230203007: Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: v2 Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/sys_info.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_manager.h"
11 #include "base/trace_event/trace_event.h" 14 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h" 15 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h" 16 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h" 17 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h" 18 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/platform_color.h" 19 #include "cc/resources/platform_color.h"
17 #include "cc/resources/resource_pool.h"
18 #include "cc/resources/scoped_resource.h" 20 #include "cc/resources/scoped_resource.h"
21 #include "gpu/GLES2/gl2extchromium.h"
19 #include "gpu/command_buffer/client/gles2_interface.h" 22 #include "gpu/command_buffer/client/gles2_interface.h"
20 #include "ui/gfx/gpu_memory_buffer.h" 23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 24
22 namespace cc { 25 namespace cc {
23 namespace { 26 namespace {
24 27
25 class RasterBufferImpl : public RasterBuffer { 28 class RasterBufferImpl : public RasterBuffer {
26 public: 29 public:
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 30 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
28 ResourceProvider* resource_provider, 31 ResourceProvider* resource_provider,
29 ResourcePool* resource_pool,
30 ResourceFormat resource_format, 32 ResourceFormat resource_format,
31 const Resource* output_resource, 33 const Resource* resource,
32 uint64_t previous_content_id) 34 uint64_t previous_content_id)
33 : worker_pool_(worker_pool), 35 : worker_pool_(worker_pool),
34 resource_provider_(resource_provider), 36 resource_(resource),
35 resource_pool_(resource_pool), 37 lock_(resource_provider, resource->id()),
36 output_resource_(output_resource), 38 previous_content_id_(previous_content_id) {}
37 raster_content_id_(0),
38 sequence_(0) {
39 if (worker_pool->have_persistent_gpu_memory_buffers() &&
40 previous_content_id) {
41 raster_resource_ =
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
43 }
44 if (raster_resource_) {
45 raster_content_id_ = previous_content_id;
46 DCHECK_EQ(resource_format, raster_resource_->format());
47 DCHECK_EQ(output_resource->size().ToString(),
48 raster_resource_->size().ToString());
49 } else {
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
51 resource_format);
52 }
53 39
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( 40 ~RasterBufferImpl() override {}
55 resource_provider_, raster_resource_->id()));
56 }
57
58 ~RasterBufferImpl() override {
59 // Release write lock in case a copy was never scheduled.
60 lock_.reset();
61
62 // Make sure any scheduled copy operations are issued before we release the
63 // raster resource.
64 if (sequence_)
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66
67 // Return resources to pool so they can be used by another RasterBuffer
68 // instance.
69 resource_pool_->ReleaseResource(raster_resource_.Pass(),
70 raster_content_id_);
71 }
72 41
73 // Overridden from RasterBuffer: 42 // Overridden from RasterBuffer:
74 void Playback(const RasterSource* raster_source, 43 void Playback(const RasterSource* raster_source,
75 const gfx::Rect& raster_full_rect, 44 const gfx::Rect& raster_full_rect,
76 const gfx::Rect& raster_dirty_rect, 45 const gfx::Rect& raster_dirty_rect,
77 uint64_t new_content_id, 46 uint64_t new_content_id,
78 float scale) override { 47 float scale) override {
79 // If there's a raster_content_id_, we are reusing a resource with that 48 worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
80 // content id. 49 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
81 bool reusing_raster_resource = raster_content_id_ != 0; 50 scale, previous_content_id_, new_content_id);
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
85 scale);
86 // Store the content id of the resource to return to the pool.
87 raster_content_id_ = new_content_id;
88 } 51 }
89 52
90 private: 53 private:
91 OneCopyTileTaskWorkerPool* worker_pool_; 54 OneCopyTileTaskWorkerPool* worker_pool_;
92 ResourceProvider* resource_provider_; 55 const Resource* resource_;
93 ResourcePool* resource_pool_; 56 ResourceProvider::ScopedWriteLockGL lock_;
94 const Resource* output_resource_; 57 uint64_t previous_content_id_;
95 uint64_t raster_content_id_;
96 scoped_ptr<ScopedResource> raster_resource_;
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
98 CopySequenceNumber sequence_;
99 58
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 59 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
101 }; 60 };
102 61
103 // Number of in-flight copy operations to allow. 62 // Default limit for number of staging buffers.
104 const int kMaxCopyOperations = 32; 63 const size_t kMaxDefaultStagingBuffers = 32;
105 64
106 // Delay been checking for copy operations to complete. 65 // Delay between checking for query result to be available.
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1; 66 const int kCheckForQueryResultAvailableTickRateMs = 1;
108 67
109 // Number of failed attempts to allow before we perform a check that will 68 // Number of attempts to allow before we perform a check that will wait for
110 // wait for copy operations to complete if needed. 69 // query to complete.
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256; 70 const int kMaxCheckForQueryResultAvailableAttempts = 256;
112 71
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 72 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
114 // default batch size for copy operations. 73 // default batch size for copy operations.
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 74 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
116 75
76 // Delay before a staging buffer might be released.
77 const int kStagingBufferExpirationDelayMs = 1000;
78
79 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
80 GLuint complete = 1;
81 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
82 return complete;
83 }
84
85 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
vmpstr 2015/07/22 23:38:52 This should probably return a bool?
reveman 2015/07/23 05:35:05 Not sure what that would be for. This function can
86 TRACE_EVENT0("cc", "WaitForQueryResult");
87
88 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
89 while (attempts_left--) {
90 if (CheckForQueryResult(gl, query_id))
91 break;
92
93 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
94 kCheckForQueryResultAvailableTickRateMs));
95 }
96
97 unsigned result = 0;
98 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
vmpstr 2015/07/22 23:38:52 Is this safe to call if we run out of attempts? If
reveman 2015/07/23 05:35:05 Yes, it's always safe to query the result but ther
99 }
100
117 } // namespace 101 } // namespace
118 102
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( 103 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, 104 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
121 const Resource* src, 105
122 const Resource* dst, 106 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
123 const gfx::Rect& rect) 107 DCHECK_EQ(texture_id, 0u);
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { 108 DCHECK_EQ(image_id, 0u);
109 DCHECK_EQ(query_id, 0u);
125 } 110 }
126 111
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { 112 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
113 gpu::gles2::GLES2Interface* gl) {
114 if (query_id) {
115 gl->DeleteQueriesEXT(1, &query_id);
116 query_id = 0;
117 }
118 if (image_id) {
119 gl->DestroyImageCHROMIUM(image_id);
120 image_id = 0;
121 }
122 if (texture_id) {
123 gl->DeleteTextures(1, &texture_id);
124 texture_id = 0;
125 }
126 }
127
128 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
129 base::trace_event::ProcessMemoryDump* pmd,
130 ResourceFormat format) const {
131 if (!gpu_memory_buffer)
132 return;
133
134 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
135 std::string buffer_dump_name =
136 base::StringPrintf("gpumemorybuffer/buffer_%d", buffer_id);
137 base::trace_event::MemoryAllocatorDump* buffer_dump =
138 pmd->CreateAllocatorDump(buffer_dump_name);
139
140 size_t buffer_size_in_bytes =
141 Resource::UncheckedMemorySizeBytes(size, format);
142 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
143 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
144 static_cast<uint64_t>(buffer_size_in_bytes));
145
146 // Emit an ownership edge towards a global allocator dump node.
147 const uint64 tracing_process_id =
148 base::trace_event::MemoryDumpManager::GetInstance()->tracing_process_id();
149 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
150 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
151 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
152
153 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
154 // the tracing UI will account the effective size of the buffer to the child.
155 const int kImportance = 2;
156 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
128 } 157 }
129 158
130 // static 159 // static
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 160 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
132 base::SequencedTaskRunner* task_runner, 161 base::SequencedTaskRunner* task_runner,
133 TaskGraphRunner* task_graph_runner, 162 TaskGraphRunner* task_graph_runner,
134 ContextProvider* context_provider, 163 ContextProvider* context_provider,
135 ResourceProvider* resource_provider, 164 ResourceProvider* resource_provider,
136 ResourcePool* resource_pool,
137 int max_copy_texture_chromium_size, 165 int max_copy_texture_chromium_size,
138 bool have_persistent_gpu_memory_buffers) { 166 bool use_persistent_gpu_memory_buffers,
167 unsigned image_target) {
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 168 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
140 task_runner, task_graph_runner, context_provider, resource_provider, 169 task_runner, task_graph_runner, resource_provider,
141 resource_pool, max_copy_texture_chromium_size, 170 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers,
142 have_persistent_gpu_memory_buffers)); 171 image_target));
143 } 172 }
144 173
145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 174 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
146 base::SequencedTaskRunner* task_runner, 175 base::SequencedTaskRunner* task_runner,
147 TaskGraphRunner* task_graph_runner, 176 TaskGraphRunner* task_graph_runner,
148 ContextProvider* context_provider,
149 ResourceProvider* resource_provider, 177 ResourceProvider* resource_provider,
150 ResourcePool* resource_pool,
151 int max_copy_texture_chromium_size, 178 int max_copy_texture_chromium_size,
152 bool have_persistent_gpu_memory_buffers) 179 bool use_persistent_gpu_memory_buffers,
180 unsigned image_target)
153 : task_runner_(task_runner), 181 : task_runner_(task_runner),
154 task_graph_runner_(task_graph_runner), 182 task_graph_runner_(task_graph_runner),
155 namespace_token_(task_graph_runner->GetNamespaceToken()), 183 namespace_token_(task_graph_runner->GetNamespaceToken()),
156 context_provider_(context_provider),
157 resource_provider_(resource_provider), 184 resource_provider_(resource_provider),
158 resource_pool_(resource_pool),
159 max_bytes_per_copy_operation_( 185 max_bytes_per_copy_operation_(
160 max_copy_texture_chromium_size 186 max_copy_texture_chromium_size
161 ? std::min(kMaxBytesPerCopyOperation, 187 ? std::min(kMaxBytesPerCopyOperation,
162 max_copy_texture_chromium_size) 188 max_copy_texture_chromium_size)
163 : kMaxBytesPerCopyOperation), 189 : kMaxBytesPerCopyOperation),
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), 190 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers),
165 last_issued_copy_operation_(0), 191 image_target_(image_target),
166 last_flushed_copy_operation_(0),
167 lock_(),
168 copy_operation_count_cv_(&lock_),
169 bytes_scheduled_since_last_flush_(0), 192 bytes_scheduled_since_last_flush_(0),
170 issued_copy_operation_count_(0), 193 max_staging_buffers_(
171 next_copy_operation_sequence_(1), 194 base::SysInfo::IsLowEndDevice()
172 check_for_completed_copy_operations_pending_(false), 195 ?
173 shutdown_(false), 196 // Use 1/4th of staging buffers on low-end devices.
197 kMaxDefaultStagingBuffers / 4
198 : kMaxDefaultStagingBuffers),
reveman 2015/07/22 22:39:15 Note: this reduction in staging buffer usage is su
vmpstr 2015/07/22 23:38:52 This should probably be a setting in layer_tree_se
reveman 2015/07/23 05:35:05 Done.
199 staging_buffer_expiration_delay_(
200 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
201 reduce_memory_usage_pending_(false),
174 weak_ptr_factory_(this), 202 weak_ptr_factory_(this),
175 task_set_finished_weak_ptr_factory_(this) { 203 task_set_finished_weak_ptr_factory_(this) {
176 DCHECK(context_provider_); 204 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
205 this, base::ThreadTaskRunnerHandle::Get());
206 reduce_memory_usage_callback_ =
207 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
208 weak_ptr_factory_.GetWeakPtr());
177 } 209 }
178 210
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 211 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
180 DCHECK_EQ(pending_copy_operations_.size(), 0u); 212 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
213 this);
181 } 214 }
182 215
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 216 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
184 return this; 217 return this;
185 } 218 }
186 219
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 220 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
188 client_ = client; 221 client_ = client;
189 } 222 }
190 223
191 void OneCopyTileTaskWorkerPool::Shutdown() { 224 void OneCopyTileTaskWorkerPool::Shutdown() {
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 225 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
193 226
194 {
195 base::AutoLock lock(lock_);
196
197 shutdown_ = true;
198 copy_operation_count_cv_.Signal();
199 }
200
201 TaskGraph empty; 227 TaskGraph empty;
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 228 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 229 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
230
231 base::AutoLock lock(lock_);
232
233 if (buffers_.empty())
234 return;
235
236 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
204 } 237 }
205 238
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 239 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 240 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
208 241
209 #if DCHECK_IS_ON()
210 {
211 base::AutoLock lock(lock_);
212 DCHECK(!shutdown_);
213 }
214 #endif
215
216 if (tasks_pending_.none()) 242 if (tasks_pending_.none())
217 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); 243 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
218 244
219 // Mark all task sets as pending. 245 // Mark all task sets as pending.
220 tasks_pending_.set(); 246 tasks_pending_.set();
221 247
222 size_t priority = kTileTaskPriorityBase; 248 size_t priority = kTileTaskPriorityBase;
223 249
224 graph_.Reset(); 250 graph_.Reset();
225 251
226 // Cancel existing OnTaskSetFinished callbacks. 252 // Cancel existing OnTaskSetFinished callbacks.
227 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); 253 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
228 254
229 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; 255 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
230 256
231 size_t task_count[kNumberOfTaskSets] = {0}; 257 size_t task_count[kNumberOfTaskSets] = {0};
232 258
233 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 259 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
234 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( 260 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
235 task_runner_.get(), 261 task_runner_.get(),
236 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, 262 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
237 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); 263 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
238 } 264 }
239 265
240 resource_pool_->CheckBusyResources(false);
241
242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); 266 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
243 it != queue->items.end(); ++it) { 267 it != queue->items.end(); ++it) {
244 const TileTaskQueue::Item& item = *it; 268 const TileTaskQueue::Item& item = *it;
245 RasterTask* task = item.task; 269 RasterTask* task = item.task;
246 DCHECK(!task->HasCompleted()); 270 DCHECK(!task->HasCompleted());
247 271
248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 272 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
249 if (!item.task_sets[task_set]) 273 if (!item.task_sets[task_set])
250 continue; 274 continue;
251 275
252 ++task_count[task_set]; 276 ++task_count[task_set];
253 277
254 graph_.edges.push_back( 278 graph_.edges.push_back(
255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); 279 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
256 } 280 }
257 281
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 282 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
259 } 283 }
260 284
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 285 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 286 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
263 kTaskSetFinishedTaskPriorityBase + task_set, 287 kTaskSetFinishedTaskPriorityBase + task_set,
264 task_count[task_set]); 288 task_count[task_set]);
265 } 289 }
266 290
267 ScheduleTasksOnOriginThread(this, &graph_); 291 ScheduleTasksOnOriginThread(this, &graph_);
292
293 // Barrier to sync any new resources to the worker context.
294 resource_provider_->output_surface()
295 ->context_provider()
296 ->ContextGL()
297 ->OrderingBarrierCHROMIUM();
298
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 299 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
269 300
270 std::copy(new_task_set_finished_tasks, 301 std::copy(new_task_set_finished_tasks,
271 new_task_set_finished_tasks + kNumberOfTaskSets, 302 new_task_set_finished_tasks + kNumberOfTaskSets,
272 task_set_finished_tasks_); 303 task_set_finished_tasks_);
273 304
274 resource_pool_->ReduceResourceUsage();
275
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 305 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
277 StateAsValue()); 306 StateAsValue());
278 } 307 }
279 308
280 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { 309 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
281 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); 310 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
282 311
283 task_graph_runner_->CollectCompletedTasks(namespace_token_, 312 task_graph_runner_->CollectCompletedTasks(namespace_token_,
284 &completed_tasks_); 313 &completed_tasks_);
285 314
286 for (Task::Vector::const_iterator it = completed_tasks_.begin(); 315 for (Task::Vector::const_iterator it = completed_tasks_.begin();
287 it != completed_tasks_.end(); ++it) { 316 it != completed_tasks_.end(); ++it) {
288 TileTask* task = static_cast<TileTask*>(it->get()); 317 TileTask* task = static_cast<TileTask*>(it->get());
289 318
290 task->WillComplete(); 319 task->WillComplete();
291 task->CompleteOnOriginThread(this); 320 task->CompleteOnOriginThread(this);
292 task->DidComplete(); 321 task->DidComplete();
293 322
294 task->RunReplyOnOriginThread(); 323 task->RunReplyOnOriginThread();
295 } 324 }
296 completed_tasks_.clear(); 325 completed_tasks_.clear();
297 } 326 }
298 327
299 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const { 328 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const {
300 return resource_provider_->best_texture_format(); 329 return resource_provider_->memory_efficient_texture_format();
vmpstr 2015/07/22 23:38:52 Why the change here?
reveman 2015/07/23 05:35:05 This was preventing us from using RGBA_4444 on som
301 } 330 }
302 331
303 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const { 332 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
304 return !PlatformColor::SameComponentOrder(GetResourceFormat()); 333 return !PlatformColor::SameComponentOrder(GetResourceFormat());
305 } 334 }
306 335
307 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( 336 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
308 const Resource* resource, 337 const Resource* resource,
309 uint64_t resource_content_id, 338 uint64_t resource_content_id,
310 uint64_t previous_content_id) { 339 uint64_t previous_content_id) {
311 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 340 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
312 // the dirty rect. 341 // the dirty rect.
313 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); 342 DCHECK_EQ(resource->format(),
314 return make_scoped_ptr<RasterBuffer>( 343 resource_provider_->memory_efficient_texture_format());
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, 344 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl(
316 resource_provider_->best_texture_format(), resource, 345 this, resource_provider_,
317 previous_content_id)); 346 resource_provider_->memory_efficient_texture_format(), resource,
347 previous_content_id));
318 } 348 }
319 349
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 350 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
321 scoped_ptr<RasterBuffer> buffer) { 351 scoped_ptr<RasterBuffer> buffer) {
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 352 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
323 } 353 }
324 354
325 CopySequenceNumber 355 void OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( 356 const Resource* resource,
327 bool reusing_raster_resource, 357 const ResourceProvider::ScopedWriteLockGL* resource_lock,
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
329 raster_resource_write_lock,
330 const Resource* raster_resource,
331 const Resource* output_resource,
332 const RasterSource* raster_source, 358 const RasterSource* raster_source,
333 const gfx::Rect& raster_full_rect, 359 const gfx::Rect& raster_full_rect,
334 const gfx::Rect& raster_dirty_rect, 360 const gfx::Rect& raster_dirty_rect,
335 float scale) { 361 float scale,
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = 362 uint64_t previous_content_id,
337 raster_resource_write_lock->GetGpuMemoryBuffer(); 363 uint64_t new_content_id) {
338 if (gpu_memory_buffer) { 364 base::AutoLock lock(lock_);
339 void* data = NULL; 365
340 bool rv = gpu_memory_buffer->Map(&data); 366 scoped_ptr<StagingBuffer> staging_buffer;
341 DCHECK(rv); 367
342 int stride; 368 ContextProvider* context_provider =
343 gpu_memory_buffer->GetStride(&stride); 369 resource_provider_->output_surface()->worker_context_provider();
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 370 DCHECK(context_provider);
345 DCHECK_GE(stride, 0); 371
372 {
vmpstr 2015/07/22 23:38:52 I think each of these { } can easily be a separate
373 ContextProvider::ScopedContextGL scoped_context(context_provider);
374
375 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
376 DCHECK(gl);
377
378 // Check if any busy buffers have become available.
379 if (resource_provider_->use_sync_query()) {
380 while (!busy_buffers_.empty()) {
381 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
382 break;
383
384 free_buffers_.push_back(busy_buffers_.take_front());
385 }
386 }
387
388 // Wait for number of non-free buffers to become less than the limit.
vmpstr 2015/07/22 23:38:52 what's the relationship between busy_buffers_ buff
reveman 2015/07/23 05:35:05 A buffer can be in one of three states. 1) free, 2
389 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
390 // Stop when there are no more busy buffers to wait for.
391 if (busy_buffers_.empty())
392 break;
393
394 if (resource_provider_->use_sync_query()) {
395 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
vmpstr 2015/07/22 23:38:52 If we ran out of attempts, can we still consider t
reveman 2015/07/23 05:35:05 The attempts are just an internal detail to avoid
396 free_buffers_.push_back(busy_buffers_.take_front());
397 } else {
398 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
399 gl->Finish();
400 while (!busy_buffers_.empty())
401 free_buffers_.push_back(busy_buffers_.take_front());
402 }
403 }
404
405 // Find a staging buffer that allows us to perform partial raster when
406 // using persistent GpuMemoryBuffers.
407 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
408 StagingBufferDeque::iterator it =
409 std::find_if(free_buffers_.begin(), free_buffers_.end(),
410 [previous_content_id](const StagingBuffer* buffer) {
411 return buffer->content_id == previous_content_id;
412 });
413 if (it != free_buffers_.end())
414 staging_buffer = free_buffers_.take(it);
415 }
416
417 // Find staging buffer of correct size.
418 if (!staging_buffer) {
419 StagingBufferDeque::iterator it =
420 std::find_if(free_buffers_.begin(), free_buffers_.end(),
421 [resource](const StagingBuffer* buffer) {
422 return buffer->size == resource->size();
423 });
424 if (it != free_buffers_.end())
425 staging_buffer = free_buffers_.take(it);
426 }
427
428 // Create new staging buffer if necessary.
429 if (!staging_buffer) {
430 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
431 buffers_.insert(staging_buffer.get());
432 }
433
434 // Release enough free buffers to stay within the limit.
435 while (buffers_.size() > max_staging_buffers_) {
436 if (free_buffers_.empty())
437 break;
438
439 free_buffers_.front()->DestroyGLResources(gl);
440 buffers_.erase(free_buffers_.front());
441 free_buffers_.take_front();
442 }
443 }
444
445 {
446 base::AutoUnlock unlock(lock_);
447
448 // Allocate GpuMemoryBuffer if necessary.
449 DCHECK(staging_buffer);
450 if (!staging_buffer->gpu_memory_buffer) {
451 staging_buffer->gpu_memory_buffer =
452 resource_provider_->gpu_memory_buffer_manager()
453 ->AllocateGpuMemoryBuffer(
454 staging_buffer->size,
455 ToGpuMemoryBufferFormat(
456 resource_provider_->memory_efficient_texture_format()),
457 use_persistent_gpu_memory_buffers_
458 ? gfx::GpuMemoryBuffer::PERSISTENT_MAP
459 : gfx::GpuMemoryBuffer::MAP);
460 }
346 461
347 gfx::Rect playback_rect = raster_full_rect; 462 gfx::Rect playback_rect = raster_full_rect;
348 if (reusing_raster_resource) { 463 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
349 playback_rect.Intersect(raster_dirty_rect); 464 // Reduce playback rect to dirty region if the content id of the staging
350 } 465 // buffer matches the prevous content id.
351 DCHECK(!playback_rect.IsEmpty()) 466 if (previous_content_id == staging_buffer->content_id)
352 << "Why are we rastering a tile that's not dirty?"; 467 playback_rect.Intersect(raster_dirty_rect);
353 TileTaskWorkerPool::PlaybackToMemory( 468 }
354 data, raster_resource->format(), raster_resource->size(), 469
355 static_cast<size_t>(stride), raster_source, raster_full_rect, 470 if (staging_buffer->gpu_memory_buffer) {
356 playback_rect, scale); 471 void* data = NULL;
vmpstr 2015/07/22 23:38:52 nullptr
reveman 2015/07/23 05:35:05 Done.
357 gpu_memory_buffer->Unmap(); 472 bool rv = staging_buffer->gpu_memory_buffer->Map(&data);
358 } 473 DCHECK(rv);
359 474 int stride;
475 staging_buffer->gpu_memory_buffer->GetStride(&stride);
476 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
477 DCHECK_GE(stride, 0);
478
479 DCHECK(!playback_rect.IsEmpty())
480 << "Why are we rastering a tile that's not dirty?";
481 TileTaskWorkerPool::PlaybackToMemory(
482 data, resource_provider_->memory_efficient_texture_format(),
483 staging_buffer->size, static_cast<size_t>(stride), raster_source,
484 raster_full_rect, playback_rect, scale);
485 staging_buffer->gpu_memory_buffer->Unmap();
486 staging_buffer->content_id = new_content_id;
487 }
488 }
489
490 {
491 ContextProvider::ScopedContextGL scoped_context(context_provider);
492
493 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
494 DCHECK(gl);
495
496 if (!staging_buffer->texture_id) {
497 gl->GenTextures(1, &staging_buffer->texture_id);
498 gl->BindTexture(image_target_, staging_buffer->texture_id);
499 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
500 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
501 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
502 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
503 if (staging_buffer->gpu_memory_buffer) {
504 DCHECK(!staging_buffer->image_id);
505 staging_buffer->image_id = gl->CreateImageCHROMIUM(
506 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
507 staging_buffer->size.width(), staging_buffer->size.height(),
508 GLInternalFormat(
509 resource_provider_->memory_efficient_texture_format()));
510 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
511 } else {
512 gl->BindTexture(image_target_, staging_buffer->texture_id);
513 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
514 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
515 }
516 }
517
518 if (resource_provider_->use_sync_query()) {
519 if (!staging_buffer->query_id)
520 gl->GenQueriesEXT(1, &staging_buffer->query_id);
521
522 #if defined(OS_CHROMEOS)
523 // TODO(reveman): This avoids a performance problem on some ChromeOS
524 // devices. This needs to be removed to support native GpuMemoryBuffer
525 // implementations. crbug.com/436314
526 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
527 #else
528 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
529 staging_buffer->query_id);
530 #endif
531 }
532
533 int bytes_per_row =
534 (BitsPerPixel(resource_provider_->memory_efficient_texture_format()) *
535 resource->size().width()) /
536 8;
537 int chunk_size_in_rows =
538 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
539 // Align chunk size to 4. Required to support compressed texture formats.
540 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
541 int y = 0;
542 int height = resource->size().height();
543 while (y < height) {
544 // Copy at most |chunk_size_in_rows|.
545 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
546 DCHECK_GT(rows_to_copy, 0);
547
548 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
549 resource_lock->texture_id(), 0, y, 0, y,
550 resource->size().width(), rows_to_copy, false,
551 false, false);
552 y += rows_to_copy;
553
554 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
555 // used for this copy operation.
556 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
557
558 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
559 gl->ShallowFlushCHROMIUM();
560 bytes_scheduled_since_last_flush_ = 0;
561 }
562 }
563
564 if (resource_provider_->use_sync_query()) {
565 #if defined(OS_CHROMEOS)
566 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
567 #else
568 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
569 #endif
570 }
571
572 // Barrier to sync worker context output to cc context.
573 gl->OrderingBarrierCHROMIUM();
574 }
575
576 staging_buffer->last_usage = base::TimeTicks::Now();
577 busy_buffers_.push_back(staging_buffer.Pass());
578
579 ScheduleReduceMemoryUsage();
580 }
581
582 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
583 base::trace_event::ProcessMemoryDump* pmd) {
360 base::AutoLock lock(lock_); 584 base::AutoLock lock(lock_);
361 585
362 CopySequenceNumber sequence = 0; 586 ResourceFormat format = resource_provider_->memory_efficient_texture_format();
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * 587 std::for_each(buffers_.begin(), buffers_.end(),
364 raster_resource->size().width()) / 588 [pmd, format](const StagingBuffer* buffer) {
365 8; 589 buffer->OnMemoryDump(pmd, format);
366 int chunk_size_in_rows = 590 });
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 591
368 // Align chunk size to 4. Required to support compressed texture formats. 592 return true;
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); 593 }
370 int y = 0; 594
371 int height = raster_resource->size().height(); 595 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
372 while (y < height) { 596 lock_.AssertAcquired();
373 int failed_attempts = 0; 597
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= 598 if (!free_buffers_.empty())
375 kMaxCopyOperations) { 599 return free_buffers_.front()->last_usage;
376 // Ignore limit when shutdown is set. 600
377 if (shutdown_) 601 if (!busy_buffers_.empty())
378 break; 602 return busy_buffers_.front()->last_usage;
379 603
380 ++failed_attempts; 604 return base::TimeTicks();
381 605 }
382 // Schedule a check that will also wait for operations to complete 606
383 // after too many failed attempts. 607 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; 608 lock_.AssertAcquired();
385 609
386 // Schedule a check for completed copy operations if too many operations 610 if (reduce_memory_usage_pending_)
387 // are currently in-flight.
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
389
390 {
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
392
393 // Wait for in-flight copy operations to drop below limit.
394 copy_operation_count_cv_.Wait();
395 }
396 }
397
398 // There may be more work available, so wake up another worker thread.
399 copy_operation_count_cv_.Signal();
400
401 // Copy at most |chunk_size_in_rows|.
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
403 DCHECK_GT(rows_to_copy, 0);
404
405 // |raster_resource_write_lock| is passed to the first copy operation as it
406 // needs to be released before we can issue a copy.
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
408 raster_resource_write_lock.Pass(), raster_resource, output_resource,
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
410 y += rows_to_copy;
411
412 // Acquire a sequence number for this copy operation.
413 sequence = next_copy_operation_sequence_++;
414
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
416 // used for this copy operation.
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
418
419 // Post task that will advance last flushed copy operation to |sequence|
420 // when |bytes_scheduled_since_last_flush_| has reached
421 // |max_bytes_per_copy_operation_|.
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
423 task_runner_->PostTask(
424 FROM_HERE,
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
426 weak_ptr_factory_.GetWeakPtr(), sequence));
427 bytes_scheduled_since_last_flush_ = 0;
428 }
429 }
430
431 return sequence;
432 }
433
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
435 CopySequenceNumber sequence) {
436 if (last_issued_copy_operation_ >= sequence)
437 return; 611 return;
438 612
439 IssueCopyOperations(sequence - last_issued_copy_operation_); 613 reduce_memory_usage_pending_ = true;
440 last_issued_copy_operation_ = sequence; 614
441 } 615 base::TimeTicks reduce_memory_usage_time =
442 616 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( 617 task_runner_->PostDelayedTask(
444 CopySequenceNumber sequence) { 618 FROM_HERE, reduce_memory_usage_callback_,
445 if (last_flushed_copy_operation_ >= sequence) 619 reduce_memory_usage_time - base::TimeTicks::Now());
620 }
621
622 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
623 base::AutoLock lock(lock_);
624
625 reduce_memory_usage_pending_ = false;
626
627 if (free_buffers_.empty() && busy_buffers_.empty())
446 return; 628 return;
447 629
448 AdvanceLastIssuedCopyTo(sequence); 630 base::TimeTicks current_time = base::TimeTicks::Now();
449 631 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
450 // Flush all issued copy operations. 632
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); 633 if (free_buffers_.empty() && busy_buffers_.empty())
452 last_flushed_copy_operation_ = last_issued_copy_operation_; 634 return;
635
636 reduce_memory_usage_pending_ = true;
637
638 base::TimeTicks reduce_memory_usage_time =
639 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
vmpstr 2015/07/22 23:38:52 Can you make a comment to what this time is... Us
reveman 2015/07/23 05:35:05 Yes, lru buffer usage time + expiration delay is n
640 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
641 reduce_memory_usage_time - current_time);
642 }
643
644 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
645 base::TimeTicks time) {
646 lock_.AssertAcquired();
647
648 ContextProvider* context_provider =
649 resource_provider_->output_surface()->worker_context_provider();
650 DCHECK(context_provider);
651
652 {
653 ContextProvider::ScopedContextGL scoped_context(context_provider);
654
655 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
656 DCHECK(gl);
657
658 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
659 // buffers as soon as we find a buffer that has been used since |time|.
660 while (!free_buffers_.empty()) {
661 if (free_buffers_.front()->last_usage > time)
662 return;
663
664 free_buffers_.front()->DestroyGLResources(gl);
665 buffers_.erase(free_buffers_.front());
666 free_buffers_.take_front();
667 }
668
669 while (!busy_buffers_.empty()) {
670 if (busy_buffers_.front()->last_usage > time)
671 return;
672
673 busy_buffers_.front()->DestroyGLResources(gl);
674 buffers_.erase(busy_buffers_.front());
675 busy_buffers_.take_front();
676 }
677 }
453 } 678 }
454 679
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 680 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 681 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
457 task_set); 682 task_set);
458 683
459 DCHECK(tasks_pending_[task_set]); 684 DCHECK(tasks_pending_[task_set]);
460 tasks_pending_[task_set] = false; 685 tasks_pending_[task_set] = false;
461 if (tasks_pending_.any()) { 686 if (tasks_pending_.any()) {
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 687 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
463 "state", StateAsValue()); 688 "state", StateAsValue());
464 } else { 689 } else {
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 690 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
466 } 691 }
467 client_->DidFinishRunningTileTasks(task_set); 692 client_->DidFinishRunningTileTasks(task_set);
468 } 693 }
469 694
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
472 count);
473
474 CopyOperation::Deque copy_operations;
475
476 {
477 base::AutoLock lock(lock_);
478
479 for (int64 i = 0; i < count; ++i) {
480 DCHECK(!pending_copy_operations_.empty());
481 copy_operations.push_back(pending_copy_operations_.take_front());
482 }
483
484 // Increment |issued_copy_operation_count_| to reflect the transition of
485 // copy operations from "pending" to "issued" state.
486 issued_copy_operation_count_ += copy_operations.size();
487 }
488
489 while (!copy_operations.empty()) {
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
491
492 // Remove the write lock.
493 copy_operation->src_write_lock.reset();
494
495 // Copy contents of source resource to destination resource.
496 resource_provider_->CopyResource(copy_operation->src->id(),
497 copy_operation->dst->id(),
498 copy_operation->rect);
499 }
500 }
501
502 void OneCopyTileTaskWorkerPool::
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
504 bool wait_if_needed) {
505 lock_.AssertAcquired();
506
507 if (check_for_completed_copy_operations_pending_)
508 return;
509
510 base::TimeTicks now = base::TimeTicks::Now();
511
512 // Schedule a check for completed copy operations as soon as possible but
513 // don't allow two consecutive checks to be scheduled to run less than the
514 // tick rate apart.
515 base::TimeTicks next_check_for_completed_copy_operations_time =
516 std::max(last_check_for_completed_copy_operations_time_ +
517 base::TimeDelta::FromMilliseconds(
518 kCheckForCompletedCopyOperationsTickRateMs),
519 now);
520
521 task_runner_->PostDelayedTask(
522 FROM_HERE,
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
525 next_check_for_completed_copy_operations_time - now);
526
527 last_check_for_completed_copy_operations_time_ =
528 next_check_for_completed_copy_operations_time;
529 check_for_completed_copy_operations_pending_ = true;
530 }
531
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
533 bool wait_if_needed) {
534 TRACE_EVENT1("cc",
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
536 "wait_if_needed", wait_if_needed);
537
538 resource_pool_->CheckBusyResources(wait_if_needed);
539
540 {
541 base::AutoLock lock(lock_);
542
543 DCHECK(check_for_completed_copy_operations_pending_);
544 check_for_completed_copy_operations_pending_ = false;
545
546 // The number of busy resources in the pool reflects the number of issued
547 // copy operations that have not yet completed.
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
549
550 // There may be work blocked on too many in-flight copy operations, so wake
551 // up a worker thread.
552 copy_operation_count_cv_.Signal();
553 }
554 }
555
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 695 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
557 OneCopyTileTaskWorkerPool::StateAsValue() const { 696 OneCopyTileTaskWorkerPool::StateAsValue() const {
558 scoped_refptr<base::trace_event::TracedValue> state = 697 scoped_refptr<base::trace_event::TracedValue> state =
559 new base::trace_event::TracedValue(); 698 new base::trace_event::TracedValue();
560 699
561 state->BeginArray("tasks_pending"); 700 state->BeginArray("tasks_pending");
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 701 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
563 state->AppendBoolean(tasks_pending_[task_set]); 702 state->AppendBoolean(tasks_pending_[task_set]);
564 state->EndArray(); 703 state->EndArray();
565 state->BeginDictionary("staging_state"); 704 state->BeginDictionary("staging_state");
566 StagingStateAsValueInto(state.get()); 705 StagingStateAsValueInto(state.get());
567 state->EndDictionary(); 706 state->EndDictionary();
568 707
569 return state; 708 return state;
570 } 709 }
571 710
572 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( 711 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
573 base::trace_event::TracedValue* staging_state) const { 712 base::trace_event::TracedValue* staging_state) const {
574 staging_state->SetInteger( 713 base::AutoLock lock(lock_);
575 "staging_resource_count", 714
576 static_cast<int>(resource_pool_->total_resource_count())); 715 staging_state->SetInteger("staging_buffer_count",
577 staging_state->SetInteger( 716 static_cast<int>(buffers_.size()));
578 "bytes_used_for_staging_resources", 717 staging_state->SetInteger("busy_count",
579 static_cast<int>(resource_pool_->total_memory_usage_bytes())); 718 static_cast<int>(busy_buffers_.size()));
580 staging_state->SetInteger( 719 staging_state->SetInteger("free_count",
581 "pending_copy_count", 720 static_cast<int>(free_buffers_.size()));
582 static_cast<int>(resource_pool_->total_resource_count() -
583 resource_pool_->acquired_resource_count()));
584 staging_state->SetInteger(
585 "bytes_pending_copy",
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
587 resource_pool_->acquired_memory_usage_bytes()));
588 } 721 }
589 722
590 } // namespace cc 723 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698