OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h" | |
6 | |
7 #include <list> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/lazy_instance.h" | |
11 #include "base/location.h" | |
12 #include "base/logging.h" | |
13 #include "base/memory/ref_counted.h" | |
14 #include "base/memory/weak_ptr.h" | |
15 #include "base/single_thread_task_runner.h" | |
16 #include "base/synchronization/cancellation_flag.h" | |
17 #include "base/synchronization/lock.h" | |
18 #include "base/synchronization/waitable_event.h" | |
19 #include "base/threading/thread.h" | |
20 #include "base/threading/thread_checker.h" | |
21 #include "base/trace_event/trace_event.h" | |
22 #include "base/trace_event/trace_event_synthetic_delay.h" | |
23 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h" | |
24 #include "ui/gl/gl_bindings.h" | |
25 #include "ui/gl/gl_context.h" | |
26 #include "ui/gl/gl_surface.h" | |
27 #include "ui/gl/gpu_preference.h" | |
28 #include "ui/gl/scoped_binders.h" | |
29 | |
30 namespace gpu { | |
31 | |
32 namespace { | |
33 | |
34 const char kAsyncTransferThreadName[] = "AsyncTransferThread"; | |
35 | |
36 void PerformNotifyCompletion( | |
37 AsyncMemoryParams mem_params, | |
38 scoped_refptr<AsyncPixelTransferCompletionObserver> observer) { | |
39 TRACE_EVENT0("gpu", "PerformNotifyCompletion"); | |
40 observer->DidComplete(mem_params); | |
41 } | |
42 | |
43 // TODO(backer): Factor out common thread scheduling logic from the EGL and | |
44 // ShareGroup implementations. http://crbug.com/239889 | |
45 class TransferThread : public base::Thread { | |
46 public: | |
47 TransferThread() | |
48 : base::Thread(kAsyncTransferThreadName), | |
49 initialized_(false) { | |
50 base::Thread::Options options; | |
51 #if defined(OS_ANDROID) || defined(OS_LINUX) | |
52 options.priority = base::ThreadPriority::BACKGROUND; | |
53 #endif | |
54 StartWithOptions(options); | |
55 } | |
56 | |
57 ~TransferThread() override { | |
58 // The only instance of this class was declared leaky. | |
59 NOTREACHED(); | |
60 } | |
61 | |
62 void InitializeOnMainThread(gfx::GLContext* parent_context) { | |
63 TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread"); | |
64 if (initialized_) | |
65 return; | |
66 | |
67 base::WaitableEvent wait_for_init(true, false); | |
68 task_runner()->PostTask( | |
69 FROM_HERE, | |
70 base::Bind(&TransferThread::InitializeOnTransferThread, | |
71 base::Unretained(this), base::Unretained(parent_context), | |
72 &wait_for_init)); | |
73 wait_for_init.Wait(); | |
74 } | |
75 | |
76 void CleanUp() override { | |
77 surface_ = NULL; | |
78 context_ = NULL; | |
79 } | |
80 | |
81 private: | |
82 bool initialized_; | |
83 | |
84 scoped_refptr<gfx::GLSurface> surface_; | |
85 scoped_refptr<gfx::GLContext> context_; | |
86 | |
87 void InitializeOnTransferThread(gfx::GLContext* parent_context, | |
88 base::WaitableEvent* caller_wait) { | |
89 TRACE_EVENT0("gpu", "InitializeOnTransferThread"); | |
90 | |
91 if (!parent_context) { | |
92 LOG(ERROR) << "No parent context provided."; | |
93 caller_wait->Signal(); | |
94 return; | |
95 } | |
96 | |
97 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1)); | |
98 if (!surface_.get()) { | |
99 LOG(ERROR) << "Unable to create GLSurface"; | |
100 caller_wait->Signal(); | |
101 return; | |
102 } | |
103 | |
104 // TODO(backer): This is coded for integrated GPUs. For discrete GPUs | |
105 // we would probably want to use a PBO texture upload for a true async | |
106 // upload (that would hopefully be optimized as a DMA transfer by the | |
107 // driver). | |
108 context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(), | |
109 surface_.get(), | |
110 gfx::PreferIntegratedGpu); | |
111 if (!context_.get()) { | |
112 LOG(ERROR) << "Unable to create GLContext."; | |
113 caller_wait->Signal(); | |
114 return; | |
115 } | |
116 | |
117 context_->MakeCurrent(surface_.get()); | |
118 initialized_ = true; | |
119 caller_wait->Signal(); | |
120 } | |
121 | |
122 DISALLOW_COPY_AND_ASSIGN(TransferThread); | |
123 }; | |
124 | |
125 base::LazyInstance<TransferThread>::Leaky | |
126 g_transfer_thread = LAZY_INSTANCE_INITIALIZER; | |
127 | |
128 base::SingleThreadTaskRunner* transfer_task_runner() { | |
129 return g_transfer_thread.Pointer()->task_runner().get(); | |
130 } | |
131 | |
132 class PendingTask : public base::RefCountedThreadSafe<PendingTask> { | |
133 public: | |
134 explicit PendingTask(const base::Closure& task) | |
135 : task_(task), task_pending_(true, false) {} | |
136 | |
137 bool TryRun() { | |
138 // This is meant to be called on the main thread where the texture | |
139 // is already bound. | |
140 DCHECK(checker_.CalledOnValidThread()); | |
141 if (task_lock_.Try()) { | |
142 // Only run once. | |
143 if (!task_.is_null()) | |
144 task_.Run(); | |
145 task_.Reset(); | |
146 | |
147 task_lock_.Release(); | |
148 task_pending_.Signal(); | |
149 return true; | |
150 } | |
151 return false; | |
152 } | |
153 | |
154 void BindAndRun(GLuint texture_id) { | |
155 // This is meant to be called on the upload thread where we don't have to | |
156 // restore the previous texture binding. | |
157 DCHECK(!checker_.CalledOnValidThread()); | |
158 base::AutoLock locked(task_lock_); | |
159 if (!task_.is_null()) { | |
160 glBindTexture(GL_TEXTURE_2D, texture_id); | |
161 task_.Run(); | |
162 task_.Reset(); | |
163 glBindTexture(GL_TEXTURE_2D, 0); | |
164 // Flush for synchronization between threads. | |
165 glFlush(); | |
166 task_pending_.Signal(); | |
167 } | |
168 } | |
169 | |
170 void Cancel() { | |
171 base::AutoLock locked(task_lock_); | |
172 task_.Reset(); | |
173 task_pending_.Signal(); | |
174 } | |
175 | |
176 bool TaskIsInProgress() { | |
177 return !task_pending_.IsSignaled(); | |
178 } | |
179 | |
180 void WaitForTask() { | |
181 task_pending_.Wait(); | |
182 } | |
183 | |
184 private: | |
185 friend class base::RefCountedThreadSafe<PendingTask>; | |
186 | |
187 virtual ~PendingTask() {} | |
188 | |
189 base::ThreadChecker checker_; | |
190 | |
191 base::Lock task_lock_; | |
192 base::Closure task_; | |
193 base::WaitableEvent task_pending_; | |
194 | |
195 DISALLOW_COPY_AND_ASSIGN(PendingTask); | |
196 }; | |
197 | |
198 // Class which holds async pixel transfers state. | |
199 // The texture_id is accessed by either thread, but everything | |
200 // else accessed only on the main thread. | |
201 class TransferStateInternal | |
202 : public base::RefCountedThreadSafe<TransferStateInternal> { | |
203 public: | |
204 TransferStateInternal(GLuint texture_id, | |
205 const AsyncTexImage2DParams& define_params) | |
206 : texture_id_(texture_id), define_params_(define_params) {} | |
207 | |
208 bool TransferIsInProgress() { | |
209 return pending_upload_task_.get() && | |
210 pending_upload_task_->TaskIsInProgress(); | |
211 } | |
212 | |
213 void BindTransfer() { | |
214 TRACE_EVENT2("gpu", "BindAsyncTransfer", | |
215 "width", define_params_.width, | |
216 "height", define_params_.height); | |
217 DCHECK(texture_id_); | |
218 | |
219 glBindTexture(GL_TEXTURE_2D, texture_id_); | |
220 bind_callback_.Run(); | |
221 } | |
222 | |
223 void WaitForTransferCompletion() { | |
224 TRACE_EVENT0("gpu", "WaitForTransferCompletion"); | |
225 DCHECK(pending_upload_task_.get()); | |
226 if (!pending_upload_task_->TryRun()) { | |
227 pending_upload_task_->WaitForTask(); | |
228 } | |
229 pending_upload_task_ = NULL; | |
230 } | |
231 | |
232 void CancelUpload() { | |
233 TRACE_EVENT0("gpu", "CancelUpload"); | |
234 if (pending_upload_task_.get()) | |
235 pending_upload_task_->Cancel(); | |
236 pending_upload_task_ = NULL; | |
237 } | |
238 | |
239 void ScheduleAsyncTexImage2D( | |
240 const AsyncTexImage2DParams tex_params, | |
241 const AsyncMemoryParams mem_params, | |
242 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats, | |
243 const base::Closure& bind_callback) { | |
244 TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage"); | |
245 pending_upload_task_ = new PendingTask(base::Bind( | |
246 &TransferStateInternal::PerformAsyncTexImage2D, | |
247 this, | |
248 tex_params, | |
249 mem_params, | |
250 texture_upload_stats)); | |
251 transfer_task_runner()->PostTask( | |
252 FROM_HERE, base::Bind(&PendingTask::BindAndRun, pending_upload_task_, | |
253 texture_id_)); | |
254 | |
255 // Save the late bind callback, so we can notify the client when it is | |
256 // bound. | |
257 bind_callback_ = bind_callback; | |
258 } | |
259 | |
260 void ScheduleAsyncTexSubImage2D( | |
261 AsyncTexSubImage2DParams tex_params, | |
262 AsyncMemoryParams mem_params, | |
263 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { | |
264 TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage"); | |
265 pending_upload_task_ = new PendingTask(base::Bind( | |
266 &TransferStateInternal::PerformAsyncTexSubImage2D, | |
267 this, | |
268 tex_params, | |
269 mem_params, | |
270 texture_upload_stats)); | |
271 transfer_task_runner()->PostTask( | |
272 FROM_HERE, base::Bind(&PendingTask::BindAndRun, pending_upload_task_, | |
273 texture_id_)); | |
274 } | |
275 | |
276 private: | |
277 friend class base::RefCountedThreadSafe<TransferStateInternal>; | |
278 | |
279 virtual ~TransferStateInternal() { | |
280 } | |
281 | |
282 void PerformAsyncTexImage2D( | |
283 AsyncTexImage2DParams tex_params, | |
284 AsyncMemoryParams mem_params, | |
285 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { | |
286 TRACE_EVENT2("gpu", | |
287 "PerformAsyncTexImage", | |
288 "width", | |
289 tex_params.width, | |
290 "height", | |
291 tex_params.height); | |
292 DCHECK_EQ(0, tex_params.level); | |
293 | |
294 base::TimeTicks begin_time; | |
295 if (texture_upload_stats.get()) | |
296 begin_time = base::TimeTicks::Now(); | |
297 | |
298 void* data = mem_params.GetDataAddress(); | |
299 | |
300 { | |
301 TRACE_EVENT0("gpu", "glTexImage2D"); | |
302 glTexImage2D(GL_TEXTURE_2D, | |
303 tex_params.level, | |
304 tex_params.internal_format, | |
305 tex_params.width, | |
306 tex_params.height, | |
307 tex_params.border, | |
308 tex_params.format, | |
309 tex_params.type, | |
310 data); | |
311 TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage"); | |
312 } | |
313 | |
314 if (texture_upload_stats.get()) { | |
315 texture_upload_stats->AddUpload(base::TimeTicks::Now() - begin_time); | |
316 } | |
317 } | |
318 | |
319 void PerformAsyncTexSubImage2D( | |
320 AsyncTexSubImage2DParams tex_params, | |
321 AsyncMemoryParams mem_params, | |
322 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { | |
323 TRACE_EVENT2("gpu", | |
324 "PerformAsyncTexSubImage2D", | |
325 "width", | |
326 tex_params.width, | |
327 "height", | |
328 tex_params.height); | |
329 DCHECK_EQ(0, tex_params.level); | |
330 | |
331 base::TimeTicks begin_time; | |
332 if (texture_upload_stats.get()) | |
333 begin_time = base::TimeTicks::Now(); | |
334 | |
335 void* data = mem_params.GetDataAddress(); | |
336 { | |
337 TRACE_EVENT0("gpu", "glTexSubImage2D"); | |
338 glTexSubImage2D(GL_TEXTURE_2D, | |
339 tex_params.level, | |
340 tex_params.xoffset, | |
341 tex_params.yoffset, | |
342 tex_params.width, | |
343 tex_params.height, | |
344 tex_params.format, | |
345 tex_params.type, | |
346 data); | |
347 TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage"); | |
348 } | |
349 | |
350 if (texture_upload_stats.get()) { | |
351 texture_upload_stats->AddUpload(base::TimeTicks::Now() - begin_time); | |
352 } | |
353 } | |
354 | |
355 scoped_refptr<PendingTask> pending_upload_task_; | |
356 | |
357 GLuint texture_id_; | |
358 | |
359 // Definition params for texture that needs binding. | |
360 AsyncTexImage2DParams define_params_; | |
361 | |
362 // Callback to invoke when AsyncTexImage2D is complete | |
363 // and the client can safely use the texture. This occurs | |
364 // during BindCompletedAsyncTransfers(). | |
365 base::Closure bind_callback_; | |
366 }; | |
367 | |
368 } // namespace | |
369 | |
370 class AsyncPixelTransferDelegateShareGroup | |
371 : public AsyncPixelTransferDelegate, | |
372 public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> { | |
373 public: | |
374 AsyncPixelTransferDelegateShareGroup( | |
375 AsyncPixelTransferManagerShareGroup::SharedState* shared_state, | |
376 GLuint texture_id, | |
377 const AsyncTexImage2DParams& define_params); | |
378 ~AsyncPixelTransferDelegateShareGroup() override; | |
379 | |
380 void BindTransfer() { state_->BindTransfer(); } | |
381 | |
382 // Implement AsyncPixelTransferDelegate: | |
383 void AsyncTexImage2D(const AsyncTexImage2DParams& tex_params, | |
384 const AsyncMemoryParams& mem_params, | |
385 const base::Closure& bind_callback) override; | |
386 void AsyncTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, | |
387 const AsyncMemoryParams& mem_params) override; | |
388 bool TransferIsInProgress() override; | |
389 void WaitForTransferCompletion() override; | |
390 | |
391 private: | |
392 // A raw pointer is safe because the SharedState is owned by the Manager, | |
393 // which owns this Delegate. | |
394 AsyncPixelTransferManagerShareGroup::SharedState* shared_state_; | |
395 scoped_refptr<TransferStateInternal> state_; | |
396 | |
397 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup); | |
398 }; | |
399 | |
400 AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup( | |
401 AsyncPixelTransferManagerShareGroup::SharedState* shared_state, | |
402 GLuint texture_id, | |
403 const AsyncTexImage2DParams& define_params) | |
404 : shared_state_(shared_state), | |
405 state_(new TransferStateInternal(texture_id, define_params)) {} | |
406 | |
407 AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() { | |
408 TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup"); | |
409 state_->CancelUpload(); | |
410 } | |
411 | |
412 bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() { | |
413 return state_->TransferIsInProgress(); | |
414 } | |
415 | |
416 void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() { | |
417 if (state_->TransferIsInProgress()) { | |
418 state_->WaitForTransferCompletion(); | |
419 DCHECK(!state_->TransferIsInProgress()); | |
420 } | |
421 | |
422 // Fast track the BindTransfer, if applicable. | |
423 for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator | |
424 iter = shared_state_->pending_allocations.begin(); | |
425 iter != shared_state_->pending_allocations.end(); | |
426 ++iter) { | |
427 if (iter->get() != this) | |
428 continue; | |
429 | |
430 shared_state_->pending_allocations.erase(iter); | |
431 BindTransfer(); | |
432 break; | |
433 } | |
434 } | |
435 | |
436 void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D( | |
437 const AsyncTexImage2DParams& tex_params, | |
438 const AsyncMemoryParams& mem_params, | |
439 const base::Closure& bind_callback) { | |
440 DCHECK(!state_->TransferIsInProgress()); | |
441 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); | |
442 DCHECK_EQ(tex_params.level, 0); | |
443 | |
444 shared_state_->pending_allocations.push_back(AsWeakPtr()); | |
445 state_->ScheduleAsyncTexImage2D(tex_params, | |
446 mem_params, | |
447 shared_state_->texture_upload_stats, | |
448 bind_callback); | |
449 } | |
450 | |
451 void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D( | |
452 const AsyncTexSubImage2DParams& tex_params, | |
453 const AsyncMemoryParams& mem_params) { | |
454 TRACE_EVENT2("gpu", "AsyncTexSubImage2D", | |
455 "width", tex_params.width, | |
456 "height", tex_params.height); | |
457 DCHECK(!state_->TransferIsInProgress()); | |
458 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); | |
459 DCHECK_EQ(tex_params.level, 0); | |
460 | |
461 state_->ScheduleAsyncTexSubImage2D( | |
462 tex_params, mem_params, shared_state_->texture_upload_stats); | |
463 } | |
464 | |
465 AsyncPixelTransferManagerShareGroup::SharedState::SharedState() | |
466 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present. | |
467 : texture_upload_stats(new AsyncPixelTransferUploadStats) {} | |
468 | |
469 AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {} | |
470 | |
471 AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup( | |
472 gfx::GLContext* context) { | |
473 g_transfer_thread.Pointer()->InitializeOnMainThread(context); | |
474 } | |
475 | |
476 AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {} | |
477 | |
478 void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() { | |
479 scoped_ptr<gfx::ScopedTextureBinder> texture_binder; | |
480 | |
481 while (!shared_state_.pending_allocations.empty()) { | |
482 if (!shared_state_.pending_allocations.front().get()) { | |
483 shared_state_.pending_allocations.pop_front(); | |
484 continue; | |
485 } | |
486 AsyncPixelTransferDelegateShareGroup* delegate = | |
487 shared_state_.pending_allocations.front().get(); | |
488 // Terminate early, as all transfers finish in order, currently. | |
489 if (delegate->TransferIsInProgress()) | |
490 break; | |
491 | |
492 if (!texture_binder) | |
493 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0)); | |
494 | |
495 // Used to set tex info from the gles2 cmd decoder once upload has | |
496 // finished (it'll bind the texture and call a callback). | |
497 delegate->BindTransfer(); | |
498 | |
499 shared_state_.pending_allocations.pop_front(); | |
500 } | |
501 } | |
502 | |
503 void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion( | |
504 const AsyncMemoryParams& mem_params, | |
505 AsyncPixelTransferCompletionObserver* observer) { | |
506 // Post a PerformNotifyCompletion task to the upload thread. This task | |
507 // will run after all async transfers are complete. | |
508 transfer_task_runner()->PostTask( | |
509 FROM_HERE, base::Bind(&PerformNotifyCompletion, mem_params, | |
510 make_scoped_refptr(observer))); | |
511 } | |
512 | |
513 uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() { | |
514 return shared_state_.texture_upload_stats->GetStats(NULL); | |
515 } | |
516 | |
517 base::TimeDelta | |
518 AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() { | |
519 base::TimeDelta total_texture_upload_time; | |
520 shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time); | |
521 return total_texture_upload_time; | |
522 } | |
523 | |
524 void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() { | |
525 } | |
526 | |
527 bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() { | |
528 return false; | |
529 } | |
530 | |
531 void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() { | |
532 if (shared_state_.pending_allocations.empty()) | |
533 return; | |
534 | |
535 AsyncPixelTransferDelegateShareGroup* delegate = | |
536 shared_state_.pending_allocations.back().get(); | |
537 if (delegate) | |
538 delegate->WaitForTransferCompletion(); | |
539 } | |
540 | |
541 AsyncPixelTransferDelegate* | |
542 AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl( | |
543 gles2::TextureRef* ref, | |
544 const AsyncTexImage2DParams& define_params) { | |
545 return new AsyncPixelTransferDelegateShareGroup( | |
546 &shared_state_, ref->service_id(), define_params); | |
547 } | |
548 | |
549 } // namespace gpu | |
OLD | NEW |