Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(137)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 22277004: Add gfx::SurfaceFactoryWebview (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: InProcessContext takes attrib struct Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <utility> 8 #include <utility>
9 9
10 #include <GLES2/gl2.h> 10 #include <GLES2/gl2.h>
11 #ifndef GL_GLEXT_PROTOTYPES 11 #ifndef GL_GLEXT_PROTOTYPES
12 #define GL_GLEXT_PROTOTYPES 1 12 #define GL_GLEXT_PROTOTYPES 1
13 #endif 13 #endif
14 #include <GLES2/gl2ext.h> 14 #include <GLES2/gl2ext.h>
15 #include <GLES2/gl2extchromium.h> 15 #include <GLES2/gl2extchromium.h>
16 16
17 #include "base/bind.h" 17 #include "base/bind.h"
18 #include "base/bind_helpers.h" 18 #include "base/bind_helpers.h"
19 #include "base/lazy_instance.h" 19 #include "base/lazy_instance.h"
20 #include "base/logging.h" 20 #include "base/logging.h"
21 #include "base/memory/weak_ptr.h" 21 #include "base/memory/weak_ptr.h"
22 #include "base/message_loop/message_loop_proxy.h" 22 #include "base/message_loop/message_loop_proxy.h"
23 #include "base/sequence_checker.h"
23 #include "base/threading/thread.h" 24 #include "base/threading/thread.h"
24 #include "gpu/command_buffer/common/id_allocator.h" 25 #include "gpu/command_buffer/common/id_allocator.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h" 26 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h" 27 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h" 28 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h" 29 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/image_manager.h" 30 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/transfer_buffer_manager.h" 31 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
31 #include "ui/gfx/size.h" 32 #include "ui/gfx/size.h"
32 #include "ui/gl/gl_context.h" 33 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h" 34 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_share_group.h" 35 #include "ui/gl/gl_share_group.h"
35 #include "ui/gl/gl_surface.h"
36 36
37 namespace gpu { 37 namespace gpu {
38 38
39 namespace { 39 namespace {
40 40
41 static base::LazyInstance<std::set<InProcessCommandBuffer*> > 41 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; 42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
43 43
44 static bool g_use_virtualized_gl_context = false; 44 static bool g_use_virtualized_gl_context = false;
45 static bool g_uses_explicit_scheduling = false; 45 static bool g_uses_explicit_scheduling = false;
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 share_group_id_(0), 237 share_group_id_(0),
238 last_put_offset_(-1), 238 last_put_offset_(-1),
239 flush_event_(false, false), 239 flush_event_(false, false),
240 queue_(CreateSchedulerClient()) {} 240 queue_(CreateSchedulerClient()) {}
241 241
242 InProcessCommandBuffer::~InProcessCommandBuffer() { 242 InProcessCommandBuffer::~InProcessCommandBuffer() {
243 Destroy(); 243 Destroy();
244 } 244 }
245 245
246 bool InProcessCommandBuffer::IsContextLost() { 246 bool InProcessCommandBuffer::IsContextLost() {
247 CheckSequencedThread();
247 if (context_lost_ || !command_buffer_) { 248 if (context_lost_ || !command_buffer_) {
248 return true; 249 return true;
249 } 250 }
250 CommandBuffer::State state = GetState(); 251 CommandBuffer::State state = GetState();
251 return error::IsError(state.error); 252 return error::IsError(state.error);
252 } 253 }
253 254
254 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { 255 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
256 CheckSequencedThread();
255 DCHECK(!surface_->IsOffscreen()); 257 DCHECK(!surface_->IsOffscreen());
256 surface_->Resize(size); 258 surface_->Resize(size);
257 } 259 }
258 260
259 bool InProcessCommandBuffer::MakeCurrent() { 261 bool InProcessCommandBuffer::MakeCurrent() {
262 CheckSequencedThread();
260 command_buffer_lock_.AssertAcquired(); 263 command_buffer_lock_.AssertAcquired();
261 264
262 if (!context_lost_ && decoder_->MakeCurrent()) 265 if (!context_lost_ && decoder_->MakeCurrent())
263 return true; 266 return true;
264 DLOG(ERROR) << "Context lost because MakeCurrent failed."; 267 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
265 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); 268 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
266 command_buffer_->SetParseError(gpu::error::kLostContext); 269 command_buffer_->SetParseError(gpu::error::kLostContext);
267 return false; 270 return false;
268 } 271 }
269 272
270 void InProcessCommandBuffer::PumpCommands() { 273 void InProcessCommandBuffer::PumpCommands() {
274 CheckSequencedThread();
271 ScopedEvent handle_flush(&flush_event_); 275 ScopedEvent handle_flush(&flush_event_);
272 command_buffer_lock_.AssertAcquired(); 276 command_buffer_lock_.AssertAcquired();
273 277
274 if (!MakeCurrent()) 278 if (!MakeCurrent())
275 return; 279 return;
276 280
277 gpu_scheduler_->PutChanged(); 281 gpu_scheduler_->PutChanged();
278 CommandBuffer::State state = command_buffer_->GetState(); 282 CommandBuffer::State state = command_buffer_->GetState();
279 DCHECK((!error::IsError(state.error) && !context_lost_) || 283 DCHECK((!error::IsError(state.error) && !context_lost_) ||
280 (error::IsError(state.error) && context_lost_)); 284 (error::IsError(state.error) && context_lost_));
281 } 285 }
282 286
283 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { 287 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
288 CheckSequencedThread();
284 command_buffer_lock_.AssertAcquired(); 289 command_buffer_lock_.AssertAcquired();
285 command_buffer_->SetGetBuffer(transfer_buffer_id); 290 command_buffer_->SetGetBuffer(transfer_buffer_id);
286 return true; 291 return true;
287 } 292 }
288 293
289 bool InProcessCommandBuffer::Initialize( 294 bool InProcessCommandBuffer::Initialize(
295 scoped_refptr<gfx::GLSurface> surface,
290 bool is_offscreen, 296 bool is_offscreen,
291 bool share_resources, 297 bool share_resources,
292 gfx::AcceleratedWidget window, 298 gfx::AcceleratedWidget window,
293 const gfx::Size& size, 299 const gfx::Size& size,
294 const char* allowed_extensions, 300 const char* allowed_extensions,
295 const std::vector<int32>& attribs, 301 const std::vector<int32>& attribs,
296 gfx::GpuPreference gpu_preference, 302 gfx::GpuPreference gpu_preference,
297 const base::Closure& context_lost_callback, 303 const base::Closure& context_lost_callback,
298 unsigned int share_group_id) { 304 unsigned int share_group_id) {
299 305
300 share_resources_ = share_resources; 306 share_resources_ = share_resources;
301 context_lost_callback_ = WrapCallback(context_lost_callback); 307 context_lost_callback_ = WrapCallback(context_lost_callback);
302 share_group_id_ = share_group_id; 308 share_group_id_ = share_group_id;
303 309
304 base::WaitableEvent completion(true, false); 310 if (surface) {
305 bool result = false; 311 // GPU thread must be the same as client thread due to GLSurface not being
312 // thread safe.
313 sequence_checker_.reset(new base::SequenceChecker);
314 surface_ = surface;
315 }
316
306 base::Callback<bool(void)> init_task = 317 base::Callback<bool(void)> init_task =
307 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, 318 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
308 base::Unretained(this), 319 base::Unretained(this),
309 is_offscreen, 320 is_offscreen,
310 window, 321 window,
311 size, 322 size,
312 allowed_extensions, 323 allowed_extensions,
313 attribs, 324 attribs,
314 gpu_preference); 325 gpu_preference);
326
327 base::WaitableEvent completion(true, false);
328 bool result = false;
315 QueueTask( 329 QueueTask(
316 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); 330 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
317 completion.Wait(); 331 completion.Wait();
318 return result; 332 return result;
319 } 333 }
320 334
321 bool InProcessCommandBuffer::InitializeOnGpuThread( 335 bool InProcessCommandBuffer::InitializeOnGpuThread(
322 bool is_offscreen, 336 bool is_offscreen,
323 gfx::AcceleratedWidget window, 337 gfx::AcceleratedWidget window,
324 const gfx::Size& size, 338 const gfx::Size& size,
325 const char* allowed_extensions, 339 const char* allowed_extensions,
326 const std::vector<int32>& attribs, 340 const std::vector<int32>& attribs,
327 gfx::GpuPreference gpu_preference) { 341 gfx::GpuPreference gpu_preference) {
342 CheckSequencedThread();
no sievers 2013/08/07 01:52:38 Does it make sense to always have a sequence check
boliu 2013/08/07 02:58:48 Can't use scoped_ptr anymore since they could both
328 // Use one share group for all contexts. 343 // Use one share group for all contexts.
329 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, 344 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
330 (new gfx::GLShareGroup)); 345 (new gfx::GLShareGroup));
331 346
332 DCHECK(size.width() >= 0 && size.height() >= 0); 347 DCHECK(size.width() >= 0 && size.height() >= 0);
333 348
334 TransferBufferManager* manager = new TransferBufferManager(); 349 TransferBufferManager* manager = new TransferBufferManager();
335 transfer_buffer_manager_.reset(manager); 350 transfer_buffer_manager_.reset(manager);
336 manager->Initialize(); 351 manager->Initialize();
337 352
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
374 NULL, NULL, NULL, NULL, bind_generates_resource))); 389 NULL, NULL, NULL, NULL, bind_generates_resource)));
375 390
376 gpu_scheduler_.reset( 391 gpu_scheduler_.reset(
377 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); 392 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
378 command_buffer->SetGetBufferChangeCallback(base::Bind( 393 command_buffer->SetGetBufferChangeCallback(base::Bind(
379 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); 394 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
380 command_buffer_ = command_buffer.Pass(); 395 command_buffer_ = command_buffer.Pass();
381 396
382 decoder_->set_engine(gpu_scheduler_.get()); 397 decoder_->set_engine(gpu_scheduler_.get());
383 398
384 if (is_offscreen) 399 if (!surface_) {
385 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); 400 if (is_offscreen)
386 else 401 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
387 surface_ = gfx::GLSurface::CreateViewGLSurface(window); 402 else
403 surface_ = gfx::GLSurface::CreateViewGLSurface(window);
404 }
388 405
389 if (!surface_.get()) { 406 if (!surface_.get()) {
390 LOG(ERROR) << "Could not create GLSurface."; 407 LOG(ERROR) << "Could not create GLSurface.";
391 DestroyOnGpuThread(); 408 DestroyOnGpuThread();
392 return false; 409 return false;
393 } 410 }
394 411
395 if (g_use_virtualized_gl_context) { 412 if (g_use_virtualized_gl_context) {
396 context_ = share_group->GetSharedContext(); 413 context_ = share_group->GetSharedContext();
397 if (!context_.get()) { 414 if (!context_.get()) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
445 } 462 }
446 463
447 if (share_resources_) { 464 if (share_resources_) {
448 g_all_shared_contexts.Pointer()->insert(this); 465 g_all_shared_contexts.Pointer()->insert(this);
449 } 466 }
450 467
451 return true; 468 return true;
452 } 469 }
453 470
454 void InProcessCommandBuffer::Destroy() { 471 void InProcessCommandBuffer::Destroy() {
472 CheckSequencedThread();
455 base::WaitableEvent completion(true, false); 473 base::WaitableEvent completion(true, false);
456 bool result = false; 474 bool result = false;
457 base::Callback<bool(void)> destroy_task = base::Bind( 475 base::Callback<bool(void)> destroy_task = base::Bind(
458 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); 476 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
459 QueueTask( 477 QueueTask(
460 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); 478 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
461 completion.Wait(); 479 completion.Wait();
462 } 480 }
463 481
464 bool InProcessCommandBuffer::DestroyOnGpuThread() { 482 bool InProcessCommandBuffer::DestroyOnGpuThread() {
483 CheckSequencedThread();
465 command_buffer_.reset(); 484 command_buffer_.reset();
466 // Clean up GL resources if possible. 485 // Clean up GL resources if possible.
467 bool have_context = context_ && context_->MakeCurrent(surface_); 486 bool have_context = context_ && context_->MakeCurrent(surface_);
468 if (decoder_) { 487 if (decoder_) {
469 decoder_->Destroy(have_context); 488 decoder_->Destroy(have_context);
470 decoder_.reset(); 489 decoder_.reset();
471 } 490 }
472 context_ = NULL; 491 context_ = NULL;
473 surface_ = NULL; 492 surface_ = NULL;
474 493
475 g_all_shared_contexts.Pointer()->erase(this); 494 g_all_shared_contexts.Pointer()->erase(this);
476 return true; 495 return true;
477 } 496 }
478 497
498 void InProcessCommandBuffer::CheckSequencedThread() {
499 DCHECK(!sequence_checker_ ||
500 sequence_checker_->CalledOnValidSequencedThread());
501 }
502
479 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( 503 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
480 gfx::GpuMemoryBufferHandle buffer, 504 gfx::GpuMemoryBufferHandle buffer,
481 gfx::Size size) { 505 gfx::Size size) {
506 CheckSequencedThread();
482 unsigned int image_id; 507 unsigned int image_id;
483 { 508 {
484 // TODO: ID allocation should go through CommandBuffer 509 // TODO: ID allocation should go through CommandBuffer
485 base::AutoLock lock(command_buffer_lock_); 510 base::AutoLock lock(command_buffer_lock_);
486 gles2::ContextGroup* group = decoder_->GetContextGroup(); 511 gles2::ContextGroup* group = decoder_->GetContextGroup();
487 image_id = 512 image_id =
488 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); 513 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
489 } 514 }
490 base::Closure image_task = 515 base::Closure image_task =
491 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, 516 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
492 base::Unretained(this), buffer, size, image_id); 517 base::Unretained(this), buffer, size, image_id);
493 QueueTask(image_task); 518 QueueTask(image_task);
494 return image_id; 519 return image_id;
495 } 520 }
496 521
497 void InProcessCommandBuffer::CreateImageOnGpuThread( 522 void InProcessCommandBuffer::CreateImageOnGpuThread(
498 gfx::GpuMemoryBufferHandle buffer, 523 gfx::GpuMemoryBufferHandle buffer,
499 gfx::Size size, 524 gfx::Size size,
500 unsigned int image_id) { 525 unsigned int image_id) {
526 CheckSequencedThread();
501 scoped_refptr<gfx::GLImage> gl_image = 527 scoped_refptr<gfx::GLImage> gl_image =
502 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); 528 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
503 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); 529 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
504 } 530 }
505 531
506 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { 532 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
533 CheckSequencedThread();
507 { 534 {
508 // TODO: ID allocation should go through CommandBuffer 535 // TODO: ID allocation should go through CommandBuffer
509 base::AutoLock lock(command_buffer_lock_); 536 base::AutoLock lock(command_buffer_lock_);
510 gles2::ContextGroup* group = decoder_->GetContextGroup(); 537 gles2::ContextGroup* group = decoder_->GetContextGroup();
511 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); 538 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
512 } 539 }
513 base::Closure image_manager_task = 540 base::Closure image_manager_task =
514 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, 541 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
515 base::Unretained(this), 542 base::Unretained(this),
516 image_id); 543 image_id);
517 QueueTask(image_manager_task); 544 QueueTask(image_manager_task);
518 } 545 }
519 546
520 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { 547 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
548 CheckSequencedThread();
521 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); 549 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
522 } 550 }
523 551
524 void InProcessCommandBuffer::OnContextLost() { 552 void InProcessCommandBuffer::OnContextLost() {
553 CheckSequencedThread();
525 if (!context_lost_callback_.is_null()) { 554 if (!context_lost_callback_.is_null()) {
526 context_lost_callback_.Run(); 555 context_lost_callback_.Run();
527 context_lost_callback_.Reset(); 556 context_lost_callback_.Reset();
528 } 557 }
529 558
530 context_lost_ = true; 559 context_lost_ = true;
531 if (share_resources_) { 560 if (share_resources_) {
532 for (std::set<InProcessCommandBuffer*>::iterator it = 561 for (std::set<InProcessCommandBuffer*>::iterator it =
533 g_all_shared_contexts.Get().begin(); 562 g_all_shared_contexts.Get().begin();
534 it != g_all_shared_contexts.Get().end(); 563 it != g_all_shared_contexts.Get().end();
535 ++it) { 564 ++it) {
536 (*it)->context_lost_ = true; 565 (*it)->context_lost_ = true;
537 } 566 }
538 } 567 }
539 } 568 }
540 569
541 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { 570 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
571 CheckSequencedThread();
542 base::AutoLock lock(command_buffer_lock_); 572 base::AutoLock lock(command_buffer_lock_);
543 return last_state_ = command_buffer_->GetState(); 573 return last_state_ = command_buffer_->GetState();
544 } 574 }
545 575
546 CommandBuffer::State InProcessCommandBuffer::GetState() { 576 CommandBuffer::State InProcessCommandBuffer::GetState() {
577 CheckSequencedThread();
547 return GetStateFast(); 578 return GetStateFast();
548 } 579 }
549 580
550 CommandBuffer::State InProcessCommandBuffer::GetLastState() { 581 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
582 CheckSequencedThread();
551 return last_state_; 583 return last_state_;
552 } 584 }
553 585
554 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; } 586 int32 InProcessCommandBuffer::GetLastToken() {
587 CheckSequencedThread();
588 return last_state_.token;
589 }
555 590
556 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { 591 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
592 CheckSequencedThread();
557 base::AutoLock lock(command_buffer_lock_); 593 base::AutoLock lock(command_buffer_lock_);
558 command_buffer_->Flush(put_offset); 594 command_buffer_->Flush(put_offset);
559 } 595 }
560 596
561 void InProcessCommandBuffer::Flush(int32 put_offset) { 597 void InProcessCommandBuffer::Flush(int32 put_offset) {
598 CheckSequencedThread();
562 if (last_state_.error != gpu::error::kNoError) 599 if (last_state_.error != gpu::error::kNoError)
563 return; 600 return;
564 601
565 if (last_put_offset_ == put_offset) 602 if (last_put_offset_ == put_offset)
566 return; 603 return;
567 604
568 last_put_offset_ = put_offset; 605 last_put_offset_ = put_offset;
569 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 606 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
570 base::Unretained(this), 607 base::Unretained(this),
571 put_offset); 608 put_offset);
572 QueueTask(task); 609 QueueTask(task);
573 } 610 }
574 611
575 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, 612 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
576 int32 last_known_get) { 613 int32 last_known_get) {
614 CheckSequencedThread();
577 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) 615 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
578 return last_state_; 616 return last_state_;
579 617
580 Flush(put_offset); 618 Flush(put_offset);
581 GetStateFast(); 619 GetStateFast();
582 while (last_known_get == last_state_.get_offset && 620 while (last_known_get == last_state_.get_offset &&
583 last_state_.error == gpu::error::kNoError) { 621 last_state_.error == gpu::error::kNoError) {
584 flush_event_.Wait(); 622 flush_event_.Wait();
585 GetStateFast(); 623 GetStateFast();
586 } 624 }
587 625
588 return last_state_; 626 return last_state_;
589 } 627 }
590 628
591 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { 629 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
630 CheckSequencedThread();
592 if (last_state_.error != gpu::error::kNoError) 631 if (last_state_.error != gpu::error::kNoError)
593 return; 632 return;
594 633
595 { 634 {
596 base::AutoLock lock(command_buffer_lock_); 635 base::AutoLock lock(command_buffer_lock_);
597 command_buffer_->SetGetBuffer(shm_id); 636 command_buffer_->SetGetBuffer(shm_id);
598 last_put_offset_ = 0; 637 last_put_offset_ = 0;
599 } 638 }
600 GetStateFast(); 639 GetStateFast();
601 } 640 }
602 641
603 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, 642 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
604 int32* id) { 643 int32* id) {
644 CheckSequencedThread();
605 base::AutoLock lock(command_buffer_lock_); 645 base::AutoLock lock(command_buffer_lock_);
606 return command_buffer_->CreateTransferBuffer(size, id); 646 return command_buffer_->CreateTransferBuffer(size, id);
607 } 647 }
608 648
609 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { 649 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
650 CheckSequencedThread();
610 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, 651 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
611 base::Unretained(command_buffer_.get()), 652 base::Unretained(command_buffer_.get()),
612 id); 653 id);
613 654
614 QueueTask(task); 655 QueueTask(task);
615 } 656 }
616 657
617 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { 658 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
618 NOTREACHED(); 659 NOTREACHED();
619 return gpu::Buffer(); 660 return gpu::Buffer();
620 } 661 }
621 662
622 uint32 InProcessCommandBuffer::InsertSyncPoint() { 663 uint32 InProcessCommandBuffer::InsertSyncPoint() {
623 NOTREACHED(); 664 NOTREACHED();
624 return 0; 665 return 0;
625 } 666 }
626 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, 667 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
627 const base::Closure& callback) { 668 const base::Closure& callback) {
669 CheckSequencedThread();
628 QueueTask(WrapCallback(callback)); 670 QueueTask(WrapCallback(callback));
629 } 671 }
630 672
631 gpu::error::Error InProcessCommandBuffer::GetLastError() { 673 gpu::error::Error InProcessCommandBuffer::GetLastError() {
674 CheckSequencedThread();
632 return last_state_.error; 675 return last_state_.error;
633 } 676 }
634 677
635 bool InProcessCommandBuffer::Initialize() { 678 bool InProcessCommandBuffer::Initialize() {
636 NOTREACHED(); 679 NOTREACHED();
637 return false; 680 return false;
638 } 681 }
639 682
640 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } 683 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
641 684
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
694 g_uses_explicit_scheduling = true; 737 g_uses_explicit_scheduling = true;
695 g_gpu_queue.Get().SetScheduleCallback(callback); 738 g_gpu_queue.Get().SetScheduleCallback(callback);
696 } 739 }
697 740
698 // static 741 // static
699 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { 742 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
700 g_gpu_queue.Get().RunTasks(); 743 g_gpu_queue.Get().RunTasks();
701 } 744 }
702 745
703 } // namespace gpu 746 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698