OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/thread_task_runner_handle.h" |
| 12 #include "base/trace_event/memory_dump_manager.h" |
11 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
12 #include "base/trace_event/trace_event_argument.h" | 14 #include "base/trace_event/trace_event_argument.h" |
13 #include "cc/base/math_util.h" | 15 #include "cc/base/math_util.h" |
14 #include "cc/debug/traced_value.h" | 16 #include "cc/debug/traced_value.h" |
15 #include "cc/raster/raster_buffer.h" | 17 #include "cc/raster/raster_buffer.h" |
16 #include "cc/resources/platform_color.h" | 18 #include "cc/resources/platform_color.h" |
17 #include "cc/resources/resource_pool.h" | 19 #include "cc/resources/resource_format.h" |
| 20 #include "cc/resources/resource_util.h" |
18 #include "cc/resources/scoped_resource.h" | 21 #include "cc/resources/scoped_resource.h" |
| 22 #include "gpu/GLES2/gl2extchromium.h" |
19 #include "gpu/command_buffer/client/gles2_interface.h" | 23 #include "gpu/command_buffer/client/gles2_interface.h" |
| 24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
20 #include "ui/gfx/buffer_format_util.h" | 25 #include "ui/gfx/buffer_format_util.h" |
21 #include "ui/gfx/gpu_memory_buffer.h" | |
22 | 26 |
23 namespace cc { | 27 namespace cc { |
24 namespace { | 28 namespace { |
25 | 29 |
26 class RasterBufferImpl : public RasterBuffer { | 30 class RasterBufferImpl : public RasterBuffer { |
27 public: | 31 public: |
28 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 32 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
29 ResourceProvider* resource_provider, | 33 ResourceProvider* resource_provider, |
30 ResourcePool* resource_pool, | |
31 ResourceFormat resource_format, | 34 ResourceFormat resource_format, |
32 const Resource* output_resource, | 35 const Resource* resource, |
33 uint64_t previous_content_id) | 36 uint64_t previous_content_id) |
34 : worker_pool_(worker_pool), | 37 : worker_pool_(worker_pool), |
35 resource_provider_(resource_provider), | 38 resource_(resource), |
36 resource_pool_(resource_pool), | 39 lock_(resource_provider, resource->id()), |
37 output_resource_(output_resource), | 40 previous_content_id_(previous_content_id) {} |
38 raster_content_id_(0), | |
39 raster_resource_(nullptr), | |
40 sequence_(0) { | |
41 if (worker_pool->have_persistent_gpu_memory_buffers() && | |
42 previous_content_id) { | |
43 raster_resource_ = | |
44 resource_pool->TryAcquireResourceWithContentId(previous_content_id); | |
45 } | |
46 if (raster_resource_) { | |
47 raster_content_id_ = previous_content_id; | |
48 DCHECK_EQ(resource_format, raster_resource_->format()); | |
49 DCHECK_EQ(output_resource->size().ToString(), | |
50 raster_resource_->size().ToString()); | |
51 } else { | |
52 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), | |
53 resource_format); | |
54 } | |
55 | 41 |
56 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 42 ~RasterBufferImpl() override {} |
57 resource_provider_, raster_resource_->id())); | |
58 } | |
59 | |
60 ~RasterBufferImpl() override { | |
61 // Release write lock in case a copy was never scheduled. | |
62 lock_.reset(); | |
63 | |
64 // Make sure any scheduled copy operations are issued before we release the | |
65 // raster resource. | |
66 if (sequence_) | |
67 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
68 | |
69 // Return resources to pool so they can be used by another RasterBuffer | |
70 // instance. | |
71 resource_pool_->ReleaseResource(raster_resource_, raster_content_id_); | |
72 raster_resource_ = nullptr; | |
73 } | |
74 | 43 |
75 // Overridden from RasterBuffer: | 44 // Overridden from RasterBuffer: |
76 void Playback(const RasterSource* raster_source, | 45 void Playback(const RasterSource* raster_source, |
77 const gfx::Rect& raster_full_rect, | 46 const gfx::Rect& raster_full_rect, |
78 const gfx::Rect& raster_dirty_rect, | 47 const gfx::Rect& raster_dirty_rect, |
79 uint64_t new_content_id, | 48 uint64_t new_content_id, |
80 float scale, | 49 float scale, |
81 bool include_images) override { | 50 bool include_images) override { |
82 // If there's a raster_content_id_, we are reusing a resource with that | 51 worker_pool_->PlaybackAndCopyOnWorkerThread( |
83 // content id. | 52 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
84 bool reusing_raster_resource = raster_content_id_ != 0; | 53 scale, include_images, previous_content_id_, new_content_id); |
85 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | |
86 reusing_raster_resource, lock_.Pass(), raster_resource_, | |
87 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | |
88 scale, include_images); | |
89 // Store the content id of the resource to return to the pool. | |
90 raster_content_id_ = new_content_id; | |
91 } | 54 } |
92 | 55 |
93 private: | 56 private: |
94 OneCopyTileTaskWorkerPool* worker_pool_; | 57 OneCopyTileTaskWorkerPool* worker_pool_; |
95 ResourceProvider* resource_provider_; | 58 const Resource* resource_; |
96 ResourcePool* resource_pool_; | 59 ResourceProvider::ScopedWriteLockGL lock_; |
97 const Resource* output_resource_; | 60 uint64_t previous_content_id_; |
98 uint64_t raster_content_id_; | |
99 Resource* raster_resource_; | |
100 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | |
101 CopySequenceNumber sequence_; | |
102 | 61 |
103 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
104 }; | 63 }; |
105 | 64 |
106 // Number of in-flight copy operations to allow. | 65 // Delay between checking for query result to be available. |
107 const int kMaxCopyOperations = 32; | 66 const int kCheckForQueryResultAvailableTickRateMs = 1; |
108 | 67 |
109 // Delay been checking for copy operations to complete. | 68 // Number of attempts to allow before we perform a check that will wait for |
110 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | 69 // query to complete. |
111 | 70 const int kMaxCheckForQueryResultAvailableAttempts = 256; |
112 // Number of failed attempts to allow before we perform a check that will | |
113 // wait for copy operations to complete if needed. | |
114 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | |
115 | 71 |
116 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 72 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
117 // default batch size for copy operations. | 73 // default batch size for copy operations. |
118 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 74 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
119 | 75 |
| 76 // Delay before a staging buffer might be released. |
| 77 const int kStagingBufferExpirationDelayMs = 1000; |
| 78 |
| 79 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 80 unsigned complete = 1; |
| 81 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); |
| 82 return !!complete; |
| 83 } |
| 84 |
| 85 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 86 TRACE_EVENT0("cc", "WaitForQueryResult"); |
| 87 |
| 88 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; |
| 89 while (attempts_left--) { |
| 90 if (CheckForQueryResult(gl, query_id)) |
| 91 break; |
| 92 |
| 93 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( |
| 94 kCheckForQueryResultAvailableTickRateMs)); |
| 95 } |
| 96 |
| 97 unsigned result = 0; |
| 98 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); |
| 99 } |
| 100 |
120 } // namespace | 101 } // namespace |
121 | 102 |
122 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | 103 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size) |
123 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | 104 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {} |
124 const Resource* src, | 105 |
125 const Resource* dst, | 106 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { |
126 const gfx::Rect& rect) | 107 DCHECK_EQ(texture_id, 0u); |
127 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | 108 DCHECK_EQ(image_id, 0u); |
| 109 DCHECK_EQ(query_id, 0u); |
128 } | 110 } |
129 | 111 |
130 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | 112 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( |
| 113 gpu::gles2::GLES2Interface* gl) { |
| 114 if (query_id) { |
| 115 gl->DeleteQueriesEXT(1, &query_id); |
| 116 query_id = 0; |
| 117 } |
| 118 if (image_id) { |
| 119 gl->DestroyImageCHROMIUM(image_id); |
| 120 image_id = 0; |
| 121 } |
| 122 if (texture_id) { |
| 123 gl->DeleteTextures(1, &texture_id); |
| 124 texture_id = 0; |
| 125 } |
| 126 } |
| 127 |
| 128 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump( |
| 129 base::trace_event::ProcessMemoryDump* pmd, |
| 130 ResourceFormat format, |
| 131 bool in_free_list) const { |
| 132 if (!gpu_memory_buffer) |
| 133 return; |
| 134 |
| 135 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); |
| 136 std::string buffer_dump_name = |
| 137 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id); |
| 138 base::trace_event::MemoryAllocatorDump* buffer_dump = |
| 139 pmd->CreateAllocatorDump(buffer_dump_name); |
| 140 |
| 141 uint64_t buffer_size_in_bytes = |
| 142 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format); |
| 143 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
| 144 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 145 buffer_size_in_bytes); |
| 146 buffer_dump->AddScalar("free_size", |
| 147 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 148 in_free_list ? buffer_size_in_bytes : 0); |
| 149 |
| 150 // Emit an ownership edge towards a global allocator dump node. |
| 151 const uint64 tracing_process_id = |
| 152 base::trace_event::MemoryDumpManager::GetInstance() |
| 153 ->GetTracingProcessId(); |
| 154 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = |
| 155 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); |
| 156 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); |
| 157 |
| 158 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) |
| 159 // the tracing UI will account the effective size of the buffer to the child. |
| 160 const int kImportance = 2; |
| 161 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); |
131 } | 162 } |
132 | 163 |
133 // static | 164 // static |
134 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 165 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
135 base::SequencedTaskRunner* task_runner, | 166 base::SequencedTaskRunner* task_runner, |
136 TaskGraphRunner* task_graph_runner, | 167 TaskGraphRunner* task_graph_runner, |
137 ContextProvider* context_provider, | 168 ContextProvider* context_provider, |
138 ResourceProvider* resource_provider, | 169 ResourceProvider* resource_provider, |
139 ResourcePool* resource_pool, | |
140 int max_copy_texture_chromium_size, | 170 int max_copy_texture_chromium_size, |
141 bool have_persistent_gpu_memory_buffers) { | 171 bool use_persistent_gpu_memory_buffers, |
| 172 int max_staging_buffers) { |
142 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
143 task_runner, task_graph_runner, context_provider, resource_provider, | 174 task_runner, task_graph_runner, resource_provider, |
144 resource_pool, max_copy_texture_chromium_size, | 175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, |
145 have_persistent_gpu_memory_buffers)); | 176 max_staging_buffers)); |
146 } | 177 } |
147 | 178 |
148 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( |
149 base::SequencedTaskRunner* task_runner, | 180 base::SequencedTaskRunner* task_runner, |
150 TaskGraphRunner* task_graph_runner, | 181 TaskGraphRunner* task_graph_runner, |
151 ContextProvider* context_provider, | |
152 ResourceProvider* resource_provider, | 182 ResourceProvider* resource_provider, |
153 ResourcePool* resource_pool, | |
154 int max_copy_texture_chromium_size, | 183 int max_copy_texture_chromium_size, |
155 bool have_persistent_gpu_memory_buffers) | 184 bool use_persistent_gpu_memory_buffers, |
| 185 int max_staging_buffers) |
156 : task_runner_(task_runner), | 186 : task_runner_(task_runner), |
157 task_graph_runner_(task_graph_runner), | 187 task_graph_runner_(task_graph_runner), |
158 namespace_token_(task_graph_runner->GetNamespaceToken()), | 188 namespace_token_(task_graph_runner->GetNamespaceToken()), |
159 context_provider_(context_provider), | |
160 resource_provider_(resource_provider), | 189 resource_provider_(resource_provider), |
161 resource_pool_(resource_pool), | |
162 max_bytes_per_copy_operation_( | 190 max_bytes_per_copy_operation_( |
163 max_copy_texture_chromium_size | 191 max_copy_texture_chromium_size |
164 ? std::min(kMaxBytesPerCopyOperation, | 192 ? std::min(kMaxBytesPerCopyOperation, |
165 max_copy_texture_chromium_size) | 193 max_copy_texture_chromium_size) |
166 : kMaxBytesPerCopyOperation), | 194 : kMaxBytesPerCopyOperation), |
167 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 195 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), |
168 last_issued_copy_operation_(0), | |
169 last_flushed_copy_operation_(0), | |
170 lock_(), | |
171 copy_operation_count_cv_(&lock_), | |
172 bytes_scheduled_since_last_flush_(0), | 196 bytes_scheduled_since_last_flush_(0), |
173 issued_copy_operation_count_(0), | 197 max_staging_buffers_(max_staging_buffers), |
174 next_copy_operation_sequence_(1), | 198 staging_buffer_expiration_delay_( |
175 check_for_completed_copy_operations_pending_(false), | 199 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), |
176 shutdown_(false), | 200 reduce_memory_usage_pending_(false), |
177 weak_ptr_factory_(this), | 201 weak_ptr_factory_(this), |
178 task_set_finished_weak_ptr_factory_(this) { | 202 task_set_finished_weak_ptr_factory_(this) { |
179 DCHECK(context_provider_); | 203 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
| 204 this, base::ThreadTaskRunnerHandle::Get()); |
| 205 reduce_memory_usage_callback_ = |
| 206 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage, |
| 207 weak_ptr_factory_.GetWeakPtr()); |
180 } | 208 } |
181 | 209 |
182 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 210 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
183 DCHECK_EQ(pending_copy_operations_.size(), 0u); | 211 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( |
| 212 this); |
184 } | 213 } |
185 | 214 |
186 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 215 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
187 return this; | 216 return this; |
188 } | 217 } |
189 | 218 |
190 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 219 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
191 client_ = client; | 220 client_ = client; |
192 } | 221 } |
193 | 222 |
194 void OneCopyTileTaskWorkerPool::Shutdown() { | 223 void OneCopyTileTaskWorkerPool::Shutdown() { |
195 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 224 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
196 | 225 |
197 { | |
198 base::AutoLock lock(lock_); | |
199 | |
200 shutdown_ = true; | |
201 copy_operation_count_cv_.Signal(); | |
202 } | |
203 | |
204 TaskGraph empty; | 226 TaskGraph empty; |
205 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 227 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
206 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 228 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
| 229 |
| 230 base::AutoLock lock(lock_); |
| 231 |
| 232 if (buffers_.empty()) |
| 233 return; |
| 234 |
| 235 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); |
207 } | 236 } |
208 | 237 |
209 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 238 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
210 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 239 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
211 | 240 |
212 #if DCHECK_IS_ON() | |
213 { | |
214 base::AutoLock lock(lock_); | |
215 DCHECK(!shutdown_); | |
216 } | |
217 #endif | |
218 | |
219 if (tasks_pending_.none()) | 241 if (tasks_pending_.none()) |
220 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 242 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
221 | 243 |
222 // Mark all task sets as pending. | 244 // Mark all task sets as pending. |
223 tasks_pending_.set(); | 245 tasks_pending_.set(); |
224 | 246 |
225 size_t priority = kTileTaskPriorityBase; | 247 size_t priority = kTileTaskPriorityBase; |
226 | 248 |
227 graph_.Reset(); | 249 graph_.Reset(); |
228 | 250 |
229 // Cancel existing OnTaskSetFinished callbacks. | 251 // Cancel existing OnTaskSetFinished callbacks. |
230 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 252 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
231 | 253 |
232 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | 254 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
233 | 255 |
234 size_t task_count[kNumberOfTaskSets] = {0}; | 256 size_t task_count[kNumberOfTaskSets] = {0}; |
235 | 257 |
236 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 258 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
237 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | 259 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( |
238 task_runner_.get(), | 260 task_runner_.get(), |
239 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | 261 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, |
240 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | 262 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); |
241 } | 263 } |
242 | 264 |
243 resource_pool_->CheckBusyResources(false); | |
244 | |
245 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 265 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
246 it != queue->items.end(); ++it) { | 266 it != queue->items.end(); ++it) { |
247 const TileTaskQueue::Item& item = *it; | 267 const TileTaskQueue::Item& item = *it; |
248 RasterTask* task = item.task; | 268 RasterTask* task = item.task; |
249 DCHECK(!task->HasCompleted()); | 269 DCHECK(!task->HasCompleted()); |
250 | 270 |
251 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 271 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
252 if (!item.task_sets[task_set]) | 272 if (!item.task_sets[task_set]) |
253 continue; | 273 continue; |
254 | 274 |
255 ++task_count[task_set]; | 275 ++task_count[task_set]; |
256 | 276 |
257 graph_.edges.push_back( | 277 graph_.edges.push_back( |
258 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | 278 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); |
259 } | 279 } |
260 | 280 |
261 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 281 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
262 } | 282 } |
263 | 283 |
264 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 284 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
265 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | 285 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), |
266 kTaskSetFinishedTaskPriorityBase + task_set, | 286 kTaskSetFinishedTaskPriorityBase + task_set, |
267 task_count[task_set]); | 287 task_count[task_set]); |
268 } | 288 } |
269 | 289 |
270 ScheduleTasksOnOriginThread(this, &graph_); | 290 ScheduleTasksOnOriginThread(this, &graph_); |
| 291 |
| 292 // Barrier to sync any new resources to the worker context. |
| 293 resource_provider_->output_surface() |
| 294 ->context_provider() |
| 295 ->ContextGL() |
| 296 ->OrderingBarrierCHROMIUM(); |
| 297 |
271 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 298 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
272 | 299 |
273 std::copy(new_task_set_finished_tasks, | 300 std::copy(new_task_set_finished_tasks, |
274 new_task_set_finished_tasks + kNumberOfTaskSets, | 301 new_task_set_finished_tasks + kNumberOfTaskSets, |
275 task_set_finished_tasks_); | 302 task_set_finished_tasks_); |
276 | 303 |
277 resource_pool_->ReduceResourceUsage(); | |
278 | |
279 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | 304 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", |
280 StateAsValue()); | 305 StateAsValue()); |
281 } | 306 } |
282 | 307 |
283 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | 308 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { |
284 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | 309 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); |
285 | 310 |
286 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 311 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
287 &completed_tasks_); | 312 &completed_tasks_); |
288 | 313 |
(...skipping 18 matching lines...) Expand all Loading... |
307 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 332 return !PlatformColor::SameComponentOrder(GetResourceFormat()); |
308 } | 333 } |
309 | 334 |
310 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 335 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
311 const Resource* resource, | 336 const Resource* resource, |
312 uint64_t resource_content_id, | 337 uint64_t resource_content_id, |
313 uint64_t previous_content_id) { | 338 uint64_t previous_content_id) { |
314 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 339 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
315 // the dirty rect. | 340 // the dirty rect. |
316 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | 341 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); |
317 return make_scoped_ptr<RasterBuffer>( | 342 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( |
318 new RasterBufferImpl(this, resource_provider_, resource_pool_, | 343 this, resource_provider_, resource_provider_->best_texture_format(), |
319 resource_provider_->best_texture_format(), resource, | 344 resource, previous_content_id)); |
320 previous_content_id)); | |
321 } | 345 } |
322 | 346 |
323 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 347 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
324 scoped_ptr<RasterBuffer> buffer) { | 348 scoped_ptr<RasterBuffer> buffer) { |
325 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 349 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
326 } | 350 } |
327 | 351 |
328 CopySequenceNumber | 352 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( |
329 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 353 const Resource* resource, |
330 bool reusing_raster_resource, | 354 const ResourceProvider::ScopedWriteLockGL* resource_lock, |
331 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | |
332 raster_resource_write_lock, | |
333 const Resource* raster_resource, | |
334 const Resource* output_resource, | |
335 const RasterSource* raster_source, | 355 const RasterSource* raster_source, |
336 const gfx::Rect& raster_full_rect, | 356 const gfx::Rect& raster_full_rect, |
337 const gfx::Rect& raster_dirty_rect, | 357 const gfx::Rect& raster_dirty_rect, |
338 float scale, | 358 float scale, |
339 bool include_images) { | 359 bool include_images, |
340 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 360 uint64_t previous_content_id, |
341 raster_resource_write_lock->GetGpuMemoryBuffer(); | 361 uint64_t new_content_id) { |
342 if (gpu_memory_buffer) { | 362 base::AutoLock lock(lock_); |
343 DCHECK_EQ( | 363 |
344 1u, gfx::NumberOfPlanesForBufferFormat(gpu_memory_buffer->GetFormat())); | 364 scoped_ptr<StagingBuffer> staging_buffer = |
345 void* data = NULL; | 365 AcquireStagingBuffer(resource, previous_content_id); |
346 bool rv = gpu_memory_buffer->Map(&data); | 366 DCHECK(staging_buffer); |
347 DCHECK(rv); | 367 |
348 int stride; | 368 { |
349 gpu_memory_buffer->GetStride(&stride); | 369 base::AutoUnlock unlock(lock_); |
350 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | 370 |
351 DCHECK_GE(stride, 0); | 371 // Allocate GpuMemoryBuffer if necessary. |
| 372 if (!staging_buffer->gpu_memory_buffer) { |
| 373 staging_buffer->gpu_memory_buffer = |
| 374 resource_provider_->gpu_memory_buffer_manager() |
| 375 ->AllocateGpuMemoryBuffer( |
| 376 staging_buffer->size, |
| 377 BufferFormat(resource_provider_->best_texture_format()), |
| 378 use_persistent_gpu_memory_buffers_ |
| 379 ? gfx::BufferUsage::PERSISTENT_MAP |
| 380 : gfx::BufferUsage::MAP); |
| 381 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat( |
| 382 staging_buffer->gpu_memory_buffer->GetFormat()), |
| 383 1u); |
| 384 } |
352 | 385 |
353 gfx::Rect playback_rect = raster_full_rect; | 386 gfx::Rect playback_rect = raster_full_rect; |
354 if (reusing_raster_resource) { | 387 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
355 playback_rect.Intersect(raster_dirty_rect); | 388 // Reduce playback rect to dirty region if the content id of the staging |
356 } | 389 // buffer matches the prevous content id. |
357 DCHECK(!playback_rect.IsEmpty()) | 390 if (previous_content_id == staging_buffer->content_id) |
358 << "Why are we rastering a tile that's not dirty?"; | 391 playback_rect.Intersect(raster_dirty_rect); |
359 TileTaskWorkerPool::PlaybackToMemory( | 392 } |
360 data, raster_resource->format(), raster_resource->size(), | 393 |
361 static_cast<size_t>(stride), raster_source, raster_full_rect, | 394 if (staging_buffer->gpu_memory_buffer) { |
362 playback_rect, scale, include_images); | 395 void* data = nullptr; |
363 gpu_memory_buffer->Unmap(); | 396 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); |
364 } | 397 DCHECK(rv); |
365 | 398 int stride; |
| 399 staging_buffer->gpu_memory_buffer->GetStride(&stride); |
| 400 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. |
| 401 DCHECK_GE(stride, 0); |
| 402 |
| 403 DCHECK(!playback_rect.IsEmpty()) |
| 404 << "Why are we rastering a tile that's not dirty?"; |
| 405 TileTaskWorkerPool::PlaybackToMemory( |
| 406 data, resource_provider_->best_texture_format(), staging_buffer->size, |
| 407 static_cast<size_t>(stride), raster_source, raster_full_rect, |
| 408 playback_rect, scale, include_images); |
| 409 staging_buffer->gpu_memory_buffer->Unmap(); |
| 410 staging_buffer->content_id = new_content_id; |
| 411 } |
| 412 } |
| 413 |
| 414 ContextProvider* context_provider = |
| 415 resource_provider_->output_surface()->worker_context_provider(); |
| 416 DCHECK(context_provider); |
| 417 |
| 418 { |
| 419 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 420 |
| 421 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 422 DCHECK(gl); |
| 423 |
| 424 unsigned image_target = resource_provider_->GetImageTextureTarget( |
| 425 resource_provider_->best_texture_format()); |
| 426 |
| 427 // Create and bind staging texture. |
| 428 if (!staging_buffer->texture_id) { |
| 429 gl->GenTextures(1, &staging_buffer->texture_id); |
| 430 gl->BindTexture(image_target, staging_buffer->texture_id); |
| 431 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
| 432 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
| 433 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 434 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 435 } else { |
| 436 gl->BindTexture(image_target, staging_buffer->texture_id); |
| 437 } |
| 438 |
| 439 // Create and bind image. |
| 440 if (!staging_buffer->image_id) { |
| 441 if (staging_buffer->gpu_memory_buffer) { |
| 442 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
| 443 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
| 444 staging_buffer->size.width(), staging_buffer->size.height(), |
| 445 GLInternalFormat(resource_provider_->best_texture_format())); |
| 446 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 447 } |
| 448 } else { |
| 449 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 450 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 451 } |
| 452 |
| 453 // Unbind staging texture. |
| 454 gl->BindTexture(image_target, 0); |
| 455 |
| 456 if (resource_provider_->use_sync_query()) { |
| 457 if (!staging_buffer->query_id) |
| 458 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
| 459 |
| 460 #if defined(OS_CHROMEOS) |
| 461 // TODO(reveman): This avoids a performance problem on some ChromeOS |
| 462 // devices. This needs to be removed to support native GpuMemoryBuffer |
| 463 // implementations. crbug.com/436314 |
| 464 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
| 465 #else |
| 466 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
| 467 staging_buffer->query_id); |
| 468 #endif |
| 469 } |
| 470 |
| 471 int bytes_per_row = |
| 472 (BitsPerPixel(resource_provider_->best_texture_format()) * |
| 473 resource->size().width()) / |
| 474 8; |
| 475 int chunk_size_in_rows = |
| 476 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
| 477 // Align chunk size to 4. Required to support compressed texture formats. |
| 478 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); |
| 479 int y = 0; |
| 480 int height = resource->size().height(); |
| 481 while (y < height) { |
| 482 // Copy at most |chunk_size_in_rows|. |
| 483 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
| 484 DCHECK_GT(rows_to_copy, 0); |
| 485 |
| 486 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, |
| 487 resource_lock->texture_id(), 0, y, 0, y, |
| 488 resource->size().width(), rows_to_copy, false, |
| 489 false, false); |
| 490 y += rows_to_copy; |
| 491 |
| 492 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
| 493 // used for this copy operation. |
| 494 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
| 495 |
| 496 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { |
| 497 gl->ShallowFlushCHROMIUM(); |
| 498 bytes_scheduled_since_last_flush_ = 0; |
| 499 } |
| 500 } |
| 501 |
| 502 if (resource_provider_->use_sync_query()) { |
| 503 #if defined(OS_CHROMEOS) |
| 504 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); |
| 505 #else |
| 506 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); |
| 507 #endif |
| 508 } |
| 509 |
| 510 // Barrier to sync worker context output to cc context. |
| 511 gl->OrderingBarrierCHROMIUM(); |
| 512 } |
| 513 |
| 514 staging_buffer->last_usage = base::TimeTicks::Now(); |
| 515 busy_buffers_.push_back(staging_buffer.Pass()); |
| 516 |
| 517 ScheduleReduceMemoryUsage(); |
| 518 } |
| 519 |
| 520 bool OneCopyTileTaskWorkerPool::OnMemoryDump( |
| 521 const base::trace_event::MemoryDumpArgs& args, |
| 522 base::trace_event::ProcessMemoryDump* pmd) { |
366 base::AutoLock lock(lock_); | 523 base::AutoLock lock(lock_); |
367 | 524 |
368 CopySequenceNumber sequence = 0; | 525 for (const auto& buffer : buffers_) { |
369 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 526 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), |
370 raster_resource->size().width()) / | 527 std::find(free_buffers_.begin(), free_buffers_.end(), |
371 8; | 528 buffer) != free_buffers_.end()); |
372 int chunk_size_in_rows = | 529 } |
373 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 530 |
374 // Align chunk size to 4. Required to support compressed texture formats. | 531 return true; |
375 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | 532 } |
376 int y = 0; | 533 |
377 int height = raster_resource->size().height(); | 534 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> |
378 while (y < height) { | 535 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, |
379 int failed_attempts = 0; | 536 uint64_t previous_content_id) { |
380 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 537 lock_.AssertAcquired(); |
381 kMaxCopyOperations) { | 538 |
382 // Ignore limit when shutdown is set. | 539 scoped_ptr<StagingBuffer> staging_buffer; |
383 if (shutdown_) | 540 |
| 541 ContextProvider* context_provider = |
| 542 resource_provider_->output_surface()->worker_context_provider(); |
| 543 DCHECK(context_provider); |
| 544 |
| 545 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 546 |
| 547 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 548 DCHECK(gl); |
| 549 |
| 550 // Check if any busy buffers have become available. |
| 551 if (resource_provider_->use_sync_query()) { |
| 552 while (!busy_buffers_.empty()) { |
| 553 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) |
384 break; | 554 break; |
385 | 555 |
386 ++failed_attempts; | 556 free_buffers_.push_back(busy_buffers_.take_front()); |
387 | 557 } |
388 // Schedule a check that will also wait for operations to complete | 558 } |
389 // after too many failed attempts. | 559 |
390 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | 560 // Wait for number of non-free buffers to become less than the limit. |
391 | 561 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) { |
392 // Schedule a check for completed copy operations if too many operations | 562 // Stop when there are no more busy buffers to wait for. |
393 // are currently in-flight. | 563 if (busy_buffers_.empty()) |
394 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | 564 break; |
395 | 565 |
396 { | 566 if (resource_provider_->use_sync_query()) { |
397 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | 567 WaitForQueryResult(gl, busy_buffers_.front()->query_id); |
398 | 568 free_buffers_.push_back(busy_buffers_.take_front()); |
399 // Wait for in-flight copy operations to drop below limit. | 569 } else { |
400 copy_operation_count_cv_.Wait(); | 570 // Fall-back to glFinish if CHROMIUM_sync_query is not available. |
401 } | 571 gl->Finish(); |
402 } | 572 while (!busy_buffers_.empty()) |
403 | 573 free_buffers_.push_back(busy_buffers_.take_front()); |
404 // There may be more work available, so wake up another worker thread. | 574 } |
405 copy_operation_count_cv_.Signal(); | 575 } |
406 | 576 |
407 // Copy at most |chunk_size_in_rows|. | 577 // Find a staging buffer that allows us to perform partial raster when |
408 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 578 // using persistent GpuMemoryBuffers. |
409 DCHECK_GT(rows_to_copy, 0); | 579 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
410 | 580 StagingBufferDeque::iterator it = |
411 // |raster_resource_write_lock| is passed to the first copy operation as it | 581 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
412 // needs to be released before we can issue a copy. | 582 [previous_content_id](const StagingBuffer* buffer) { |
413 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 583 return buffer->content_id == previous_content_id; |
414 raster_resource_write_lock.Pass(), raster_resource, output_resource, | 584 }); |
415 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | 585 if (it != free_buffers_.end()) |
416 y += rows_to_copy; | 586 staging_buffer = free_buffers_.take(it); |
417 | 587 } |
418 // Acquire a sequence number for this copy operation. | 588 |
419 sequence = next_copy_operation_sequence_++; | 589 // Find staging buffer of correct size. |
420 | 590 if (!staging_buffer) { |
421 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 591 StagingBufferDeque::iterator it = |
422 // used for this copy operation. | 592 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
423 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 593 [resource](const StagingBuffer* buffer) { |
424 | 594 return buffer->size == resource->size(); |
425 // Post task that will advance last flushed copy operation to |sequence| | 595 }); |
426 // when |bytes_scheduled_since_last_flush_| has reached | 596 if (it != free_buffers_.end()) |
427 // |max_bytes_per_copy_operation_|. | 597 staging_buffer = free_buffers_.take(it); |
428 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 598 } |
429 task_runner_->PostTask( | 599 |
430 FROM_HERE, | 600 // Create new staging buffer if necessary. |
431 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | 601 if (!staging_buffer) { |
432 weak_ptr_factory_.GetWeakPtr(), sequence)); | 602 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); |
433 bytes_scheduled_since_last_flush_ = 0; | 603 buffers_.insert(staging_buffer.get()); |
434 } | 604 } |
435 } | 605 |
436 | 606 // Release enough free buffers to stay within the limit. |
437 return sequence; | 607 while (buffers_.size() > max_staging_buffers_) { |
438 } | 608 if (free_buffers_.empty()) |
439 | 609 break; |
440 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | 610 |
441 CopySequenceNumber sequence) { | 611 free_buffers_.front()->DestroyGLResources(gl); |
442 if (last_issued_copy_operation_ >= sequence) | 612 buffers_.erase(free_buffers_.front()); |
| 613 free_buffers_.take_front(); |
| 614 } |
| 615 |
| 616 return staging_buffer.Pass(); |
| 617 } |
| 618 |
| 619 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() { |
| 620 lock_.AssertAcquired(); |
| 621 |
| 622 if (!free_buffers_.empty()) |
| 623 return free_buffers_.front()->last_usage; |
| 624 |
| 625 if (!busy_buffers_.empty()) |
| 626 return busy_buffers_.front()->last_usage; |
| 627 |
| 628 return base::TimeTicks(); |
| 629 } |
| 630 |
| 631 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() { |
| 632 lock_.AssertAcquired(); |
| 633 |
| 634 if (reduce_memory_usage_pending_) |
443 return; | 635 return; |
444 | 636 |
445 IssueCopyOperations(sequence - last_issued_copy_operation_); | 637 reduce_memory_usage_pending_ = true; |
446 last_issued_copy_operation_ = sequence; | 638 |
447 } | 639 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer |
448 | 640 // should be released. |
449 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | 641 base::TimeTicks reduce_memory_usage_time = |
450 CopySequenceNumber sequence) { | 642 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
451 if (last_flushed_copy_operation_ >= sequence) | 643 task_runner_->PostDelayedTask( |
| 644 FROM_HERE, reduce_memory_usage_callback_, |
| 645 reduce_memory_usage_time - base::TimeTicks::Now()); |
| 646 } |
| 647 |
| 648 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() { |
| 649 base::AutoLock lock(lock_); |
| 650 |
| 651 reduce_memory_usage_pending_ = false; |
| 652 |
| 653 if (free_buffers_.empty() && busy_buffers_.empty()) |
452 return; | 654 return; |
453 | 655 |
454 AdvanceLastIssuedCopyTo(sequence); | 656 base::TimeTicks current_time = base::TimeTicks::Now(); |
455 | 657 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); |
456 // Flush all issued copy operations. | 658 |
457 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | 659 if (free_buffers_.empty() && busy_buffers_.empty()) |
458 last_flushed_copy_operation_ = last_issued_copy_operation_; | 660 return; |
| 661 |
| 662 reduce_memory_usage_pending_ = true; |
| 663 |
| 664 // Schedule another call to ReduceMemoryUsage at the time when the next |
| 665 // buffer should be released. |
| 666 base::TimeTicks reduce_memory_usage_time = |
| 667 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
| 668 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, |
| 669 reduce_memory_usage_time - current_time); |
| 670 } |
| 671 |
| 672 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince( |
| 673 base::TimeTicks time) { |
| 674 lock_.AssertAcquired(); |
| 675 |
| 676 ContextProvider* context_provider = |
| 677 resource_provider_->output_surface()->worker_context_provider(); |
| 678 DCHECK(context_provider); |
| 679 |
| 680 { |
| 681 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 682 |
| 683 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 684 DCHECK(gl); |
| 685 |
| 686 // Note: Front buffer is guaranteed to be LRU so we can stop releasing |
| 687 // buffers as soon as we find a buffer that has been used since |time|. |
| 688 while (!free_buffers_.empty()) { |
| 689 if (free_buffers_.front()->last_usage > time) |
| 690 return; |
| 691 |
| 692 free_buffers_.front()->DestroyGLResources(gl); |
| 693 buffers_.erase(free_buffers_.front()); |
| 694 free_buffers_.take_front(); |
| 695 } |
| 696 |
| 697 while (!busy_buffers_.empty()) { |
| 698 if (busy_buffers_.front()->last_usage > time) |
| 699 return; |
| 700 |
| 701 busy_buffers_.front()->DestroyGLResources(gl); |
| 702 buffers_.erase(busy_buffers_.front()); |
| 703 busy_buffers_.take_front(); |
| 704 } |
| 705 } |
459 } | 706 } |
460 | 707 |
461 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 708 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
462 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 709 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
463 task_set); | 710 task_set); |
464 | 711 |
465 DCHECK(tasks_pending_[task_set]); | 712 DCHECK(tasks_pending_[task_set]); |
466 tasks_pending_[task_set] = false; | 713 tasks_pending_[task_set] = false; |
467 if (tasks_pending_.any()) { | 714 if (tasks_pending_.any()) { |
468 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 715 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
469 "state", StateAsValue()); | 716 "state", StateAsValue()); |
470 } else { | 717 } else { |
471 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 718 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
472 } | 719 } |
473 client_->DidFinishRunningTileTasks(task_set); | 720 client_->DidFinishRunningTileTasks(task_set); |
474 } | 721 } |
475 | 722 |
476 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
477 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
478 count); | |
479 | |
480 CopyOperation::Deque copy_operations; | |
481 | |
482 { | |
483 base::AutoLock lock(lock_); | |
484 | |
485 for (int64 i = 0; i < count; ++i) { | |
486 DCHECK(!pending_copy_operations_.empty()); | |
487 copy_operations.push_back(pending_copy_operations_.take_front()); | |
488 } | |
489 | |
490 // Increment |issued_copy_operation_count_| to reflect the transition of | |
491 // copy operations from "pending" to "issued" state. | |
492 issued_copy_operation_count_ += copy_operations.size(); | |
493 } | |
494 | |
495 while (!copy_operations.empty()) { | |
496 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
497 | |
498 // Remove the write lock. | |
499 copy_operation->src_write_lock.reset(); | |
500 | |
501 // Copy contents of source resource to destination resource. | |
502 resource_provider_->CopyResource(copy_operation->src->id(), | |
503 copy_operation->dst->id(), | |
504 copy_operation->rect); | |
505 } | |
506 } | |
507 | |
508 void OneCopyTileTaskWorkerPool:: | |
509 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
510 bool wait_if_needed) { | |
511 lock_.AssertAcquired(); | |
512 | |
513 if (check_for_completed_copy_operations_pending_) | |
514 return; | |
515 | |
516 base::TimeTicks now = base::TimeTicks::Now(); | |
517 | |
518 // Schedule a check for completed copy operations as soon as possible but | |
519 // don't allow two consecutive checks to be scheduled to run less than the | |
520 // tick rate apart. | |
521 base::TimeTicks next_check_for_completed_copy_operations_time = | |
522 std::max(last_check_for_completed_copy_operations_time_ + | |
523 base::TimeDelta::FromMilliseconds( | |
524 kCheckForCompletedCopyOperationsTickRateMs), | |
525 now); | |
526 | |
527 task_runner_->PostDelayedTask( | |
528 FROM_HERE, | |
529 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
530 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
531 next_check_for_completed_copy_operations_time - now); | |
532 | |
533 last_check_for_completed_copy_operations_time_ = | |
534 next_check_for_completed_copy_operations_time; | |
535 check_for_completed_copy_operations_pending_ = true; | |
536 } | |
537 | |
538 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
539 bool wait_if_needed) { | |
540 TRACE_EVENT1("cc", | |
541 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
542 "wait_if_needed", wait_if_needed); | |
543 | |
544 resource_pool_->CheckBusyResources(wait_if_needed); | |
545 | |
546 { | |
547 base::AutoLock lock(lock_); | |
548 | |
549 DCHECK(check_for_completed_copy_operations_pending_); | |
550 check_for_completed_copy_operations_pending_ = false; | |
551 | |
552 // The number of busy resources in the pool reflects the number of issued | |
553 // copy operations that have not yet completed. | |
554 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
555 | |
556 // There may be work blocked on too many in-flight copy operations, so wake | |
557 // up a worker thread. | |
558 copy_operation_count_cv_.Signal(); | |
559 } | |
560 } | |
561 | |
562 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 723 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
563 OneCopyTileTaskWorkerPool::StateAsValue() const { | 724 OneCopyTileTaskWorkerPool::StateAsValue() const { |
564 scoped_refptr<base::trace_event::TracedValue> state = | 725 scoped_refptr<base::trace_event::TracedValue> state = |
565 new base::trace_event::TracedValue(); | 726 new base::trace_event::TracedValue(); |
566 | 727 |
567 state->BeginArray("tasks_pending"); | 728 state->BeginArray("tasks_pending"); |
568 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 729 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
569 state->AppendBoolean(tasks_pending_[task_set]); | 730 state->AppendBoolean(tasks_pending_[task_set]); |
570 state->EndArray(); | 731 state->EndArray(); |
571 state->BeginDictionary("staging_state"); | 732 state->BeginDictionary("staging_state"); |
572 StagingStateAsValueInto(state.get()); | 733 StagingStateAsValueInto(state.get()); |
573 state->EndDictionary(); | 734 state->EndDictionary(); |
574 | 735 |
575 return state; | 736 return state; |
576 } | 737 } |
577 | 738 |
578 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | 739 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( |
579 base::trace_event::TracedValue* staging_state) const { | 740 base::trace_event::TracedValue* staging_state) const { |
580 staging_state->SetInteger( | 741 base::AutoLock lock(lock_); |
581 "staging_resource_count", | 742 |
582 static_cast<int>(resource_pool_->total_resource_count())); | 743 staging_state->SetInteger("staging_buffer_count", |
583 staging_state->SetInteger( | 744 static_cast<int>(buffers_.size())); |
584 "bytes_used_for_staging_resources", | 745 staging_state->SetInteger("busy_count", |
585 static_cast<int>(resource_pool_->total_memory_usage_bytes())); | 746 static_cast<int>(busy_buffers_.size())); |
586 staging_state->SetInteger( | 747 staging_state->SetInteger("free_count", |
587 "pending_copy_count", | 748 static_cast<int>(free_buffers_.size())); |
588 static_cast<int>(resource_pool_->total_resource_count() - | |
589 resource_pool_->acquired_resource_count())); | |
590 staging_state->SetInteger( | |
591 "bytes_pending_copy", | |
592 static_cast<int>(resource_pool_->total_memory_usage_bytes() - | |
593 resource_pool_->acquired_memory_usage_bytes())); | |
594 } | 749 } |
595 | 750 |
596 } // namespace cc | 751 } // namespace cc |
OLD | NEW |