OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "cc/resources/zero_copy_tile_task_worker_pool.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/strings/stringprintf.h" | |
10 #include "base/trace_event/trace_event.h" | |
11 #include "base/trace_event/trace_event_argument.h" | |
12 #include "cc/debug/traced_value.h" | |
13 #include "cc/resources/raster_buffer.h" | |
14 #include "cc/resources/resource.h" | |
15 #include "ui/gfx/gpu_memory_buffer.h" | |
16 | |
17 namespace cc { | |
18 namespace { | |
19 | |
20 class RasterBufferImpl : public RasterBuffer { | |
21 public: | |
22 RasterBufferImpl(ResourceProvider* resource_provider, | |
23 const Resource* resource) | |
24 : lock_(resource_provider, resource->id()), resource_(resource) {} | |
25 | |
26 // Overridden from RasterBuffer: | |
27 void Playback(const RasterSource* raster_source, | |
28 const gfx::Rect& rect, | |
29 float scale) override { | |
30 gfx::GpuMemoryBuffer* gpu_memory_buffer = lock_.GetGpuMemoryBuffer(); | |
31 if (!gpu_memory_buffer) | |
32 return; | |
33 | |
34 TileTaskWorkerPool::PlaybackToMemory( | |
35 gpu_memory_buffer->Map(), resource_->format(), resource_->size(), | |
36 gpu_memory_buffer->GetStride(), raster_source, rect, scale); | |
37 gpu_memory_buffer->Unmap(); | |
38 } | |
39 | |
40 private: | |
41 ResourceProvider::ScopedWriteLockGpuMemoryBuffer lock_; | |
42 const Resource* resource_; | |
43 | |
44 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | |
45 }; | |
46 | |
47 } // namespace | |
48 | |
49 // static | |
50 scoped_ptr<TileTaskWorkerPool> ZeroCopyTileTaskWorkerPool::Create( | |
51 base::SequencedTaskRunner* task_runner, | |
52 TaskGraphRunner* task_graph_runner, | |
53 ResourceProvider* resource_provider) { | |
54 return make_scoped_ptr<TileTaskWorkerPool>(new ZeroCopyTileTaskWorkerPool( | |
55 task_runner, task_graph_runner, resource_provider)); | |
56 } | |
57 | |
58 ZeroCopyTileTaskWorkerPool::ZeroCopyTileTaskWorkerPool( | |
59 base::SequencedTaskRunner* task_runner, | |
60 TaskGraphRunner* task_graph_runner, | |
61 ResourceProvider* resource_provider) | |
62 : task_runner_(task_runner), | |
63 task_graph_runner_(task_graph_runner), | |
64 namespace_token_(task_graph_runner->GetNamespaceToken()), | |
65 resource_provider_(resource_provider), | |
66 task_set_finished_weak_ptr_factory_(this) { | |
67 } | |
68 | |
69 ZeroCopyTileTaskWorkerPool::~ZeroCopyTileTaskWorkerPool() { | |
70 } | |
71 | |
72 TileTaskRunner* ZeroCopyTileTaskWorkerPool::AsTileTaskRunner() { | |
73 return this; | |
74 } | |
75 | |
76 void ZeroCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | |
77 client_ = client; | |
78 } | |
79 | |
80 void ZeroCopyTileTaskWorkerPool::Shutdown() { | |
81 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::Shutdown"); | |
82 | |
83 TaskGraph empty; | |
84 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | |
85 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | |
86 } | |
87 | |
88 void ZeroCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | |
89 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::ScheduleTasks"); | |
90 | |
91 if (tasks_pending_.none()) | |
92 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | |
93 | |
94 // Mark all task sets as pending. | |
95 tasks_pending_.set(); | |
96 | |
97 unsigned priority = kTileTaskPriorityBase; | |
98 | |
99 graph_.Reset(); | |
100 | |
101 // Cancel existing OnTaskSetFinished callbacks. | |
102 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | |
103 | |
104 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | |
105 | |
106 size_t task_count[kNumberOfTaskSets] = {0}; | |
107 | |
108 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
109 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | |
110 task_runner_.get(), | |
111 base::Bind(&ZeroCopyTileTaskWorkerPool::OnTaskSetFinished, | |
112 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | |
113 } | |
114 | |
115 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | |
116 it != queue->items.end(); ++it) { | |
117 const TileTaskQueue::Item& item = *it; | |
118 RasterTask* task = item.task; | |
119 DCHECK(!task->HasCompleted()); | |
120 | |
121 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
122 if (!item.task_sets[task_set]) | |
123 continue; | |
124 | |
125 ++task_count[task_set]; | |
126 | |
127 graph_.edges.push_back( | |
128 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | |
129 } | |
130 | |
131 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | |
132 } | |
133 | |
134 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
135 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | |
136 kTaskSetFinishedTaskPriorityBase + task_set, | |
137 task_count[task_set]); | |
138 } | |
139 | |
140 ScheduleTasksOnOriginThread(this, &graph_); | |
141 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | |
142 | |
143 std::copy(new_task_set_finished_tasks, | |
144 new_task_set_finished_tasks + kNumberOfTaskSets, | |
145 task_set_finished_tasks_); | |
146 | |
147 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | |
148 StateAsValue()); | |
149 } | |
150 | |
151 void ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks() { | |
152 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks"); | |
153 | |
154 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
155 &completed_tasks_); | |
156 for (Task::Vector::const_iterator it = completed_tasks_.begin(); | |
157 it != completed_tasks_.end(); ++it) { | |
158 TileTask* task = static_cast<TileTask*>(it->get()); | |
159 | |
160 task->WillComplete(); | |
161 task->CompleteOnOriginThread(this); | |
162 task->DidComplete(); | |
163 | |
164 task->RunReplyOnOriginThread(); | |
165 } | |
166 completed_tasks_.clear(); | |
167 } | |
168 | |
169 ResourceFormat ZeroCopyTileTaskWorkerPool::GetResourceFormat() { | |
170 return resource_provider_->best_texture_format(); | |
171 } | |
172 | |
173 scoped_ptr<RasterBuffer> ZeroCopyTileTaskWorkerPool::AcquireBufferForRaster( | |
174 const Resource* resource) { | |
175 return make_scoped_ptr<RasterBuffer>( | |
176 new RasterBufferImpl(resource_provider_, resource)); | |
177 } | |
178 | |
179 void ZeroCopyTileTaskWorkerPool::ReleaseBufferForRaster( | |
180 scoped_ptr<RasterBuffer> buffer) { | |
181 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | |
182 } | |
183 | |
184 void ZeroCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | |
185 TRACE_EVENT1("cc", "ZeroCopyTileTaskWorkerPool::OnTaskSetFinished", | |
186 "task_set", task_set); | |
187 | |
188 DCHECK(tasks_pending_[task_set]); | |
189 tasks_pending_[task_set] = false; | |
190 if (tasks_pending_.any()) { | |
191 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | |
192 "state", StateAsValue()); | |
193 } else { | |
194 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | |
195 } | |
196 client_->DidFinishRunningTileTasks(task_set); | |
197 } | |
198 | |
199 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | |
200 ZeroCopyTileTaskWorkerPool::StateAsValue() const { | |
201 scoped_refptr<base::trace_event::TracedValue> state = | |
202 new base::trace_event::TracedValue(); | |
203 | |
204 state->BeginArray("tasks_pending"); | |
205 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | |
206 state->AppendBoolean(tasks_pending_[task_set]); | |
207 state->EndArray(); | |
208 return state; | |
209 } | |
210 | |
211 } // namespace cc | |
OLD | NEW |