OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include <GLES2/gl2.h> | 10 #include <GLES2/gl2.h> |
11 #ifndef GL_GLEXT_PROTOTYPES | 11 #ifndef GL_GLEXT_PROTOTYPES |
12 #define GL_GLEXT_PROTOTYPES 1 | 12 #define GL_GLEXT_PROTOTYPES 1 |
13 #endif | 13 #endif |
14 #include <GLES2/gl2ext.h> | 14 #include <GLES2/gl2ext.h> |
15 #include <GLES2/gl2extchromium.h> | 15 #include <GLES2/gl2extchromium.h> |
16 | 16 |
17 #include "base/bind.h" | 17 #include "base/bind.h" |
18 #include "base/bind_helpers.h" | 18 #include "base/bind_helpers.h" |
19 #include "base/lazy_instance.h" | 19 #include "base/lazy_instance.h" |
20 #include "base/logging.h" | 20 #include "base/logging.h" |
21 #include "base/memory/weak_ptr.h" | 21 #include "base/memory/weak_ptr.h" |
22 #include "base/message_loop/message_loop_proxy.h" | 22 #include "base/message_loop/message_loop_proxy.h" |
| 23 #include "base/sequence_checker.h" |
23 #include "base/threading/thread.h" | 24 #include "base/threading/thread.h" |
24 #include "gpu/command_buffer/common/id_allocator.h" | 25 #include "gpu/command_buffer/common/id_allocator.h" |
25 #include "gpu/command_buffer/service/command_buffer_service.h" | 26 #include "gpu/command_buffer/service/command_buffer_service.h" |
26 #include "gpu/command_buffer/service/context_group.h" | 27 #include "gpu/command_buffer/service/context_group.h" |
27 #include "gpu/command_buffer/service/gl_context_virtual.h" | 28 #include "gpu/command_buffer/service/gl_context_virtual.h" |
28 #include "gpu/command_buffer/service/gpu_scheduler.h" | 29 #include "gpu/command_buffer/service/gpu_scheduler.h" |
29 #include "gpu/command_buffer/service/image_manager.h" | 30 #include "gpu/command_buffer/service/image_manager.h" |
30 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | 31 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
31 #include "ui/gfx/size.h" | 32 #include "ui/gfx/size.h" |
32 #include "ui/gl/gl_context.h" | 33 #include "ui/gl/gl_context.h" |
33 #include "ui/gl/gl_image.h" | 34 #include "ui/gl/gl_image.h" |
34 #include "ui/gl/gl_share_group.h" | 35 #include "ui/gl/gl_share_group.h" |
35 #include "ui/gl/gl_surface.h" | |
36 | 36 |
37 namespace gpu { | 37 namespace gpu { |
38 | 38 |
39 namespace { | 39 namespace { |
40 | 40 |
41 static base::LazyInstance<std::set<InProcessCommandBuffer*> > | 41 static base::LazyInstance<std::set<InProcessCommandBuffer*> > |
42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; | 42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; |
43 | 43 |
44 static bool g_use_virtualized_gl_context = false; | 44 static bool g_use_virtualized_gl_context = false; |
45 static bool g_uses_explicit_scheduling = false; | 45 static bool g_uses_explicit_scheduling = false; |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
237 share_group_id_(0), | 237 share_group_id_(0), |
238 last_put_offset_(-1), | 238 last_put_offset_(-1), |
239 flush_event_(false, false), | 239 flush_event_(false, false), |
240 queue_(CreateSchedulerClient()) {} | 240 queue_(CreateSchedulerClient()) {} |
241 | 241 |
242 InProcessCommandBuffer::~InProcessCommandBuffer() { | 242 InProcessCommandBuffer::~InProcessCommandBuffer() { |
243 Destroy(); | 243 Destroy(); |
244 } | 244 } |
245 | 245 |
246 bool InProcessCommandBuffer::IsContextLost() { | 246 bool InProcessCommandBuffer::IsContextLost() { |
| 247 CheckSequencedThread(); |
247 if (context_lost_ || !command_buffer_) { | 248 if (context_lost_ || !command_buffer_) { |
248 return true; | 249 return true; |
249 } | 250 } |
250 CommandBuffer::State state = GetState(); | 251 CommandBuffer::State state = GetState(); |
251 return error::IsError(state.error); | 252 return error::IsError(state.error); |
252 } | 253 } |
253 | 254 |
254 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { | 255 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { |
| 256 CheckSequencedThread(); |
255 DCHECK(!surface_->IsOffscreen()); | 257 DCHECK(!surface_->IsOffscreen()); |
256 surface_->Resize(size); | 258 surface_->Resize(size); |
257 } | 259 } |
258 | 260 |
259 bool InProcessCommandBuffer::MakeCurrent() { | 261 bool InProcessCommandBuffer::MakeCurrent() { |
| 262 CheckSequencedThread(); |
260 command_buffer_lock_.AssertAcquired(); | 263 command_buffer_lock_.AssertAcquired(); |
261 | 264 |
262 if (!context_lost_ && decoder_->MakeCurrent()) | 265 if (!context_lost_ && decoder_->MakeCurrent()) |
263 return true; | 266 return true; |
264 DLOG(ERROR) << "Context lost because MakeCurrent failed."; | 267 DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
265 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 268 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
266 command_buffer_->SetParseError(gpu::error::kLostContext); | 269 command_buffer_->SetParseError(gpu::error::kLostContext); |
267 return false; | 270 return false; |
268 } | 271 } |
269 | 272 |
270 void InProcessCommandBuffer::PumpCommands() { | 273 void InProcessCommandBuffer::PumpCommands() { |
| 274 CheckSequencedThread(); |
271 command_buffer_lock_.AssertAcquired(); | 275 command_buffer_lock_.AssertAcquired(); |
272 | 276 |
273 if (!MakeCurrent()) | 277 if (!MakeCurrent()) |
274 return; | 278 return; |
275 | 279 |
276 gpu_scheduler_->PutChanged(); | 280 gpu_scheduler_->PutChanged(); |
277 } | 281 } |
278 | 282 |
279 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { | 283 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { |
| 284 CheckSequencedThread(); |
280 command_buffer_lock_.AssertAcquired(); | 285 command_buffer_lock_.AssertAcquired(); |
281 command_buffer_->SetGetBuffer(transfer_buffer_id); | 286 command_buffer_->SetGetBuffer(transfer_buffer_id); |
282 return true; | 287 return true; |
283 } | 288 } |
284 | 289 |
285 bool InProcessCommandBuffer::Initialize( | 290 bool InProcessCommandBuffer::Initialize( |
| 291 scoped_refptr<gfx::GLSurface> surface, |
286 bool is_offscreen, | 292 bool is_offscreen, |
287 bool share_resources, | 293 bool share_resources, |
288 gfx::AcceleratedWidget window, | 294 gfx::AcceleratedWidget window, |
289 const gfx::Size& size, | 295 const gfx::Size& size, |
290 const char* allowed_extensions, | 296 const char* allowed_extensions, |
291 const std::vector<int32>& attribs, | 297 const std::vector<int32>& attribs, |
292 gfx::GpuPreference gpu_preference, | 298 gfx::GpuPreference gpu_preference, |
293 const base::Closure& context_lost_callback, | 299 const base::Closure& context_lost_callback, |
294 unsigned int share_group_id) { | 300 unsigned int share_group_id) { |
295 | 301 |
296 share_resources_ = share_resources; | 302 share_resources_ = share_resources; |
297 context_lost_callback_ = WrapCallback(context_lost_callback); | 303 context_lost_callback_ = WrapCallback(context_lost_callback); |
298 share_group_id_ = share_group_id; | 304 share_group_id_ = share_group_id; |
299 | 305 |
300 base::WaitableEvent completion(true, false); | 306 if (surface) { |
301 bool result = false; | 307 // GPU thread must be the same as client thread due to GLSurface not being |
| 308 // thread safe. |
| 309 sequence_checker_.reset(new base::SequenceChecker); |
| 310 surface_ = surface; |
| 311 } |
| 312 |
302 base::Callback<bool(void)> init_task = | 313 base::Callback<bool(void)> init_task = |
303 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, | 314 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
304 base::Unretained(this), | 315 base::Unretained(this), |
305 is_offscreen, | 316 is_offscreen, |
306 window, | 317 window, |
307 size, | 318 size, |
308 allowed_extensions, | 319 allowed_extensions, |
309 attribs, | 320 attribs, |
310 gpu_preference); | 321 gpu_preference); |
| 322 |
| 323 base::WaitableEvent completion(true, false); |
| 324 bool result = false; |
311 QueueTask( | 325 QueueTask( |
312 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); | 326 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); |
313 completion.Wait(); | 327 completion.Wait(); |
314 return result; | 328 return result; |
315 } | 329 } |
316 | 330 |
317 bool InProcessCommandBuffer::InitializeOnGpuThread( | 331 bool InProcessCommandBuffer::InitializeOnGpuThread( |
318 bool is_offscreen, | 332 bool is_offscreen, |
319 gfx::AcceleratedWidget window, | 333 gfx::AcceleratedWidget window, |
320 const gfx::Size& size, | 334 const gfx::Size& size, |
321 const char* allowed_extensions, | 335 const char* allowed_extensions, |
322 const std::vector<int32>& attribs, | 336 const std::vector<int32>& attribs, |
323 gfx::GpuPreference gpu_preference) { | 337 gfx::GpuPreference gpu_preference) { |
| 338 CheckSequencedThread(); |
324 // Use one share group for all contexts. | 339 // Use one share group for all contexts. |
325 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, | 340 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, |
326 (new gfx::GLShareGroup)); | 341 (new gfx::GLShareGroup)); |
327 | 342 |
328 DCHECK(size.width() >= 0 && size.height() >= 0); | 343 DCHECK(size.width() >= 0 && size.height() >= 0); |
329 | 344 |
330 TransferBufferManager* manager = new TransferBufferManager(); | 345 TransferBufferManager* manager = new TransferBufferManager(); |
331 transfer_buffer_manager_.reset(manager); | 346 transfer_buffer_manager_.reset(manager); |
332 manager->Initialize(); | 347 manager->Initialize(); |
333 | 348 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 NULL, NULL, NULL, NULL, bind_generates_resource))); | 385 NULL, NULL, NULL, NULL, bind_generates_resource))); |
371 | 386 |
372 gpu_scheduler_.reset( | 387 gpu_scheduler_.reset( |
373 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); | 388 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); |
374 command_buffer->SetGetBufferChangeCallback(base::Bind( | 389 command_buffer->SetGetBufferChangeCallback(base::Bind( |
375 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); | 390 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); |
376 command_buffer_ = command_buffer.Pass(); | 391 command_buffer_ = command_buffer.Pass(); |
377 | 392 |
378 decoder_->set_engine(gpu_scheduler_.get()); | 393 decoder_->set_engine(gpu_scheduler_.get()); |
379 | 394 |
380 if (is_offscreen) | 395 if (!surface_) { |
381 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); | 396 if (is_offscreen) |
382 else | 397 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); |
383 surface_ = gfx::GLSurface::CreateViewGLSurface(window); | 398 else |
| 399 surface_ = gfx::GLSurface::CreateViewGLSurface(window); |
| 400 } |
384 | 401 |
385 if (!surface_.get()) { | 402 if (!surface_.get()) { |
386 LOG(ERROR) << "Could not create GLSurface."; | 403 LOG(ERROR) << "Could not create GLSurface."; |
387 DestroyOnGpuThread(); | 404 DestroyOnGpuThread(); |
388 return false; | 405 return false; |
389 } | 406 } |
390 | 407 |
391 if (g_use_virtualized_gl_context) { | 408 if (g_use_virtualized_gl_context) { |
392 context_ = share_group->GetSharedContext(); | 409 context_ = share_group->GetSharedContext(); |
393 if (!context_.get()) { | 410 if (!context_.get()) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
441 } | 458 } |
442 | 459 |
443 if (share_resources_) { | 460 if (share_resources_) { |
444 g_all_shared_contexts.Pointer()->insert(this); | 461 g_all_shared_contexts.Pointer()->insert(this); |
445 } | 462 } |
446 | 463 |
447 return true; | 464 return true; |
448 } | 465 } |
449 | 466 |
450 void InProcessCommandBuffer::Destroy() { | 467 void InProcessCommandBuffer::Destroy() { |
| 468 CheckSequencedThread(); |
451 base::WaitableEvent completion(true, false); | 469 base::WaitableEvent completion(true, false); |
452 bool result = false; | 470 bool result = false; |
453 base::Callback<bool(void)> destroy_task = base::Bind( | 471 base::Callback<bool(void)> destroy_task = base::Bind( |
454 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | 472 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
455 QueueTask( | 473 QueueTask( |
456 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | 474 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
457 completion.Wait(); | 475 completion.Wait(); |
458 } | 476 } |
459 | 477 |
460 bool InProcessCommandBuffer::DestroyOnGpuThread() { | 478 bool InProcessCommandBuffer::DestroyOnGpuThread() { |
| 479 CheckSequencedThread(); |
461 command_buffer_.reset(); | 480 command_buffer_.reset(); |
462 // Clean up GL resources if possible. | 481 // Clean up GL resources if possible. |
463 bool have_context = context_ && context_->MakeCurrent(surface_); | 482 bool have_context = context_ && context_->MakeCurrent(surface_); |
464 if (decoder_) { | 483 if (decoder_) { |
465 decoder_->Destroy(have_context); | 484 decoder_->Destroy(have_context); |
466 decoder_.reset(); | 485 decoder_.reset(); |
467 } | 486 } |
468 context_ = NULL; | 487 context_ = NULL; |
469 surface_ = NULL; | 488 surface_ = NULL; |
470 | 489 |
471 g_all_shared_contexts.Pointer()->erase(this); | 490 g_all_shared_contexts.Pointer()->erase(this); |
472 return true; | 491 return true; |
473 } | 492 } |
474 | 493 |
| 494 void InProcessCommandBuffer::CheckSequencedThread() { |
| 495 DCHECK(!sequence_checker_ || |
| 496 sequence_checker_->CalledOnValidSequencedThread()); |
| 497 } |
| 498 |
475 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( | 499 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( |
476 gfx::GpuMemoryBufferHandle buffer, | 500 gfx::GpuMemoryBufferHandle buffer, |
477 gfx::Size size) { | 501 gfx::Size size) { |
| 502 CheckSequencedThread(); |
478 unsigned int image_id; | 503 unsigned int image_id; |
479 { | 504 { |
480 // TODO: ID allocation should go through CommandBuffer | 505 // TODO: ID allocation should go through CommandBuffer |
481 base::AutoLock lock(command_buffer_lock_); | 506 base::AutoLock lock(command_buffer_lock_); |
482 gles2::ContextGroup* group = decoder_->GetContextGroup(); | 507 gles2::ContextGroup* group = decoder_->GetContextGroup(); |
483 image_id = | 508 image_id = |
484 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); | 509 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); |
485 } | 510 } |
486 base::Closure image_task = | 511 base::Closure image_task = |
487 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, | 512 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
488 base::Unretained(this), buffer, size, image_id); | 513 base::Unretained(this), buffer, size, image_id); |
489 QueueTask(image_task); | 514 QueueTask(image_task); |
490 return image_id; | 515 return image_id; |
491 } | 516 } |
492 | 517 |
493 void InProcessCommandBuffer::CreateImageOnGpuThread( | 518 void InProcessCommandBuffer::CreateImageOnGpuThread( |
494 gfx::GpuMemoryBufferHandle buffer, | 519 gfx::GpuMemoryBufferHandle buffer, |
495 gfx::Size size, | 520 gfx::Size size, |
496 unsigned int image_id) { | 521 unsigned int image_id) { |
| 522 CheckSequencedThread(); |
497 scoped_refptr<gfx::GLImage> gl_image = | 523 scoped_refptr<gfx::GLImage> gl_image = |
498 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); | 524 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); |
499 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); | 525 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); |
500 } | 526 } |
501 | 527 |
502 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { | 528 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { |
| 529 CheckSequencedThread(); |
503 { | 530 { |
504 // TODO: ID allocation should go through CommandBuffer | 531 // TODO: ID allocation should go through CommandBuffer |
505 base::AutoLock lock(command_buffer_lock_); | 532 base::AutoLock lock(command_buffer_lock_); |
506 gles2::ContextGroup* group = decoder_->GetContextGroup(); | 533 gles2::ContextGroup* group = decoder_->GetContextGroup(); |
507 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); | 534 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); |
508 } | 535 } |
509 base::Closure image_manager_task = | 536 base::Closure image_manager_task = |
510 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, | 537 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, |
511 base::Unretained(this), | 538 base::Unretained(this), |
512 image_id); | 539 image_id); |
513 QueueTask(image_manager_task); | 540 QueueTask(image_manager_task); |
514 } | 541 } |
515 | 542 |
516 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { | 543 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { |
| 544 CheckSequencedThread(); |
517 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); | 545 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); |
518 } | 546 } |
519 | 547 |
520 void InProcessCommandBuffer::OnContextLost() { | 548 void InProcessCommandBuffer::OnContextLost() { |
| 549 CheckSequencedThread(); |
521 if (!context_lost_callback_.is_null()) { | 550 if (!context_lost_callback_.is_null()) { |
522 context_lost_callback_.Run(); | 551 context_lost_callback_.Run(); |
523 context_lost_callback_.Reset(); | 552 context_lost_callback_.Reset(); |
524 } | 553 } |
525 | 554 |
526 context_lost_ = true; | 555 context_lost_ = true; |
527 if (share_resources_) { | 556 if (share_resources_) { |
528 for (std::set<InProcessCommandBuffer*>::iterator it = | 557 for (std::set<InProcessCommandBuffer*>::iterator it = |
529 g_all_shared_contexts.Get().begin(); | 558 g_all_shared_contexts.Get().begin(); |
530 it != g_all_shared_contexts.Get().end(); | 559 it != g_all_shared_contexts.Get().end(); |
531 ++it) { | 560 ++it) { |
532 (*it)->context_lost_ = true; | 561 (*it)->context_lost_ = true; |
533 } | 562 } |
534 } | 563 } |
535 } | 564 } |
536 | 565 |
537 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | 566 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
| 567 CheckSequencedThread(); |
538 base::AutoLock lock(state_after_last_flush_lock_); | 568 base::AutoLock lock(state_after_last_flush_lock_); |
539 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) | 569 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) |
540 last_state_ = state_after_last_flush_; | 570 last_state_ = state_after_last_flush_; |
541 return last_state_; | 571 return last_state_; |
542 } | 572 } |
543 | 573 |
544 CommandBuffer::State InProcessCommandBuffer::GetState() { | 574 CommandBuffer::State InProcessCommandBuffer::GetState() { |
| 575 CheckSequencedThread(); |
545 return GetStateFast(); | 576 return GetStateFast(); |
546 } | 577 } |
547 | 578 |
548 CommandBuffer::State InProcessCommandBuffer::GetLastState() { | 579 CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
| 580 CheckSequencedThread(); |
549 return last_state_; | 581 return last_state_; |
550 } | 582 } |
551 | 583 |
552 int32 InProcessCommandBuffer::GetLastToken() { | 584 int32 InProcessCommandBuffer::GetLastToken() { |
| 585 CheckSequencedThread(); |
553 GetStateFast(); | 586 GetStateFast(); |
554 return last_state_.token; | 587 return last_state_.token; |
555 } | 588 } |
556 | 589 |
557 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { | 590 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { |
| 591 CheckSequencedThread(); |
558 ScopedEvent handle_flush(&flush_event_); | 592 ScopedEvent handle_flush(&flush_event_); |
559 base::AutoLock lock(command_buffer_lock_); | 593 base::AutoLock lock(command_buffer_lock_); |
560 command_buffer_->Flush(put_offset); | 594 command_buffer_->Flush(put_offset); |
561 { | 595 { |
562 // Update state before signaling the flush event. | 596 // Update state before signaling the flush event. |
563 base::AutoLock lock(state_after_last_flush_lock_); | 597 base::AutoLock lock(state_after_last_flush_lock_); |
564 state_after_last_flush_ = command_buffer_->GetState(); | 598 state_after_last_flush_ = command_buffer_->GetState(); |
565 } | 599 } |
566 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || | 600 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
567 (error::IsError(state_after_last_flush_.error) && context_lost_)); | 601 (error::IsError(state_after_last_flush_.error) && context_lost_)); |
568 } | 602 } |
569 | 603 |
570 void InProcessCommandBuffer::Flush(int32 put_offset) { | 604 void InProcessCommandBuffer::Flush(int32 put_offset) { |
| 605 CheckSequencedThread(); |
571 if (last_state_.error != gpu::error::kNoError) | 606 if (last_state_.error != gpu::error::kNoError) |
572 return; | 607 return; |
573 | 608 |
574 if (last_put_offset_ == put_offset) | 609 if (last_put_offset_ == put_offset) |
575 return; | 610 return; |
576 | 611 |
577 last_put_offset_ = put_offset; | 612 last_put_offset_ = put_offset; |
578 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | 613 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
579 base::Unretained(this), | 614 base::Unretained(this), |
580 put_offset); | 615 put_offset); |
581 QueueTask(task); | 616 QueueTask(task); |
582 } | 617 } |
583 | 618 |
584 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, | 619 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, |
585 int32 last_known_get) { | 620 int32 last_known_get) { |
| 621 CheckSequencedThread(); |
586 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) | 622 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) |
587 return last_state_; | 623 return last_state_; |
588 | 624 |
589 Flush(put_offset); | 625 Flush(put_offset); |
590 GetStateFast(); | 626 GetStateFast(); |
591 while (last_known_get == last_state_.get_offset && | 627 while (last_known_get == last_state_.get_offset && |
592 last_state_.error == gpu::error::kNoError) { | 628 last_state_.error == gpu::error::kNoError) { |
593 flush_event_.Wait(); | 629 flush_event_.Wait(); |
594 GetStateFast(); | 630 GetStateFast(); |
595 } | 631 } |
596 | 632 |
597 return last_state_; | 633 return last_state_; |
598 } | 634 } |
599 | 635 |
600 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { | 636 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { |
| 637 CheckSequencedThread(); |
601 if (last_state_.error != gpu::error::kNoError) | 638 if (last_state_.error != gpu::error::kNoError) |
602 return; | 639 return; |
603 | 640 |
604 { | 641 { |
605 base::AutoLock lock(command_buffer_lock_); | 642 base::AutoLock lock(command_buffer_lock_); |
606 command_buffer_->SetGetBuffer(shm_id); | 643 command_buffer_->SetGetBuffer(shm_id); |
607 last_put_offset_ = 0; | 644 last_put_offset_ = 0; |
608 } | 645 } |
609 { | 646 { |
610 base::AutoLock lock(state_after_last_flush_lock_); | 647 base::AutoLock lock(state_after_last_flush_lock_); |
611 state_after_last_flush_ = command_buffer_->GetState(); | 648 state_after_last_flush_ = command_buffer_->GetState(); |
612 } | 649 } |
613 } | 650 } |
614 | 651 |
615 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, | 652 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, |
616 int32* id) { | 653 int32* id) { |
| 654 CheckSequencedThread(); |
617 base::AutoLock lock(command_buffer_lock_); | 655 base::AutoLock lock(command_buffer_lock_); |
618 return command_buffer_->CreateTransferBuffer(size, id); | 656 return command_buffer_->CreateTransferBuffer(size, id); |
619 } | 657 } |
620 | 658 |
621 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { | 659 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { |
| 660 CheckSequencedThread(); |
622 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, | 661 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, |
623 base::Unretained(command_buffer_.get()), | 662 base::Unretained(command_buffer_.get()), |
624 id); | 663 id); |
625 | 664 |
626 QueueTask(task); | 665 QueueTask(task); |
627 } | 666 } |
628 | 667 |
629 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { | 668 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { |
630 NOTREACHED(); | 669 NOTREACHED(); |
631 return gpu::Buffer(); | 670 return gpu::Buffer(); |
632 } | 671 } |
633 | 672 |
634 uint32 InProcessCommandBuffer::InsertSyncPoint() { | 673 uint32 InProcessCommandBuffer::InsertSyncPoint() { |
635 NOTREACHED(); | 674 NOTREACHED(); |
636 return 0; | 675 return 0; |
637 } | 676 } |
638 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, | 677 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, |
639 const base::Closure& callback) { | 678 const base::Closure& callback) { |
| 679 CheckSequencedThread(); |
640 QueueTask(WrapCallback(callback)); | 680 QueueTask(WrapCallback(callback)); |
641 } | 681 } |
642 | 682 |
643 gpu::error::Error InProcessCommandBuffer::GetLastError() { | 683 gpu::error::Error InProcessCommandBuffer::GetLastError() { |
| 684 CheckSequencedThread(); |
644 return last_state_.error; | 685 return last_state_.error; |
645 } | 686 } |
646 | 687 |
647 bool InProcessCommandBuffer::Initialize() { | 688 bool InProcessCommandBuffer::Initialize() { |
648 NOTREACHED(); | 689 NOTREACHED(); |
649 return false; | 690 return false; |
650 } | 691 } |
651 | 692 |
652 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } | 693 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } |
653 | 694 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
706 g_uses_explicit_scheduling = true; | 747 g_uses_explicit_scheduling = true; |
707 g_gpu_queue.Get().SetScheduleCallback(callback); | 748 g_gpu_queue.Get().SetScheduleCallback(callback); |
708 } | 749 } |
709 | 750 |
710 // static | 751 // static |
711 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { | 752 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { |
712 g_gpu_queue.Get().RunTasks(); | 753 g_gpu_queue.Get().RunTasks(); |
713 } | 754 } |
714 | 755 |
715 } // namespace gpu | 756 } // namespace gpu |
OLD | NEW |