OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
| 6 |
| 7 #include <queue> |
| 8 #include <utility> |
| 9 |
| 10 #include <GLES2/gl2.h> |
| 11 #ifndef GL_GLEXT_PROTOTYPES |
| 12 #define GL_GLEXT_PROTOTYPES 1 |
| 13 #endif |
| 14 #include <GLES2/gl2ext.h> |
| 15 #include <GLES2/gl2extchromium.h> |
| 16 |
| 17 #include "base/bind.h" |
| 18 #include "base/bind_helpers.h" |
| 19 #include "base/lazy_instance.h" |
| 20 #include "base/logging.h" |
| 21 #include "base/memory/weak_ptr.h" |
| 22 #include "base/message_loop/message_loop_proxy.h" |
| 23 #include "base/threading/thread.h" |
| 24 #include "gpu/command_buffer/common/id_allocator.h" |
| 25 #include "gpu/command_buffer/service/command_buffer_service.h" |
| 26 #include "gpu/command_buffer/service/context_group.h" |
| 27 #include "gpu/command_buffer/service/gl_context_virtual.h" |
| 28 #include "gpu/command_buffer/service/gpu_scheduler.h" |
| 29 #include "gpu/command_buffer/service/image_manager.h" |
| 30 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| 31 #include "ui/gfx/size.h" |
| 32 #include "ui/gl/gl_context.h" |
| 33 #include "ui/gl/gl_image.h" |
| 34 #include "ui/gl/gl_share_group.h" |
| 35 #include "ui/gl/gl_surface.h" |
| 36 |
| 37 namespace gpu { |
| 38 |
| 39 namespace { |
| 40 |
| 41 static base::LazyInstance<std::set<InProcessCommandBuffer*> > |
| 42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; |
| 43 |
| 44 static bool g_use_virtualized_gl_context = false; |
| 45 static bool g_uses_explicit_scheduling = false; |
| 46 |
| 47 template <typename T> |
| 48 static void RunTaskWithResult(base::Callback<T(void)> task, |
| 49 T* result, |
| 50 base::WaitableEvent* completion) { |
| 51 *result = task.Run(); |
| 52 completion->Signal(); |
| 53 } |
| 54 |
| 55 class GpuInProcessThread |
| 56 : public base::Thread, |
| 57 public base::RefCountedThreadSafe<GpuInProcessThread> { |
| 58 public: |
| 59 GpuInProcessThread(); |
| 60 |
| 61 private: |
| 62 friend class base::RefCountedThreadSafe<GpuInProcessThread>; |
| 63 virtual ~GpuInProcessThread(); |
| 64 |
| 65 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); |
| 66 }; |
| 67 |
| 68 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { |
| 69 Start(); |
| 70 } |
| 71 |
| 72 GpuInProcessThread::~GpuInProcessThread() { |
| 73 Stop(); |
| 74 } |
| 75 |
| 76 // Used with explicit scheduling when there is no dedicated GPU thread. |
| 77 class GpuCommandQueue { |
| 78 public: |
| 79 GpuCommandQueue(); |
| 80 ~GpuCommandQueue(); |
| 81 |
| 82 void QueueTask(const base::Closure& task); |
| 83 void RunTasks(); |
| 84 void SetScheduleCallback(const base::Closure& callback); |
| 85 |
| 86 private: |
| 87 base::Lock tasks_lock_; |
| 88 std::queue<base::Closure> tasks_; |
| 89 base::Closure schedule_callback_; |
| 90 |
| 91 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); |
| 92 }; |
| 93 |
| 94 GpuCommandQueue::GpuCommandQueue() {} |
| 95 |
| 96 GpuCommandQueue::~GpuCommandQueue() { |
| 97 base::AutoLock lock(tasks_lock_); |
| 98 DCHECK(tasks_.empty()); |
| 99 } |
| 100 |
| 101 void GpuCommandQueue::QueueTask(const base::Closure& task) { |
| 102 { |
| 103 base::AutoLock lock(tasks_lock_); |
| 104 tasks_.push(task); |
| 105 } |
| 106 |
| 107 DCHECK(!schedule_callback_.is_null()); |
| 108 schedule_callback_.Run(); |
| 109 } |
| 110 |
| 111 void GpuCommandQueue::RunTasks() { |
| 112 size_t num_tasks; |
| 113 { |
| 114 base::AutoLock lock(tasks_lock_); |
| 115 num_tasks = tasks_.size(); |
| 116 } |
| 117 |
| 118 while (num_tasks) { |
| 119 base::Closure task; |
| 120 { |
| 121 base::AutoLock lock(tasks_lock_); |
| 122 task = tasks_.front(); |
| 123 tasks_.pop(); |
| 124 num_tasks = tasks_.size(); |
| 125 } |
| 126 |
| 127 task.Run(); |
| 128 } |
| 129 } |
| 130 |
| 131 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { |
| 132 DCHECK(schedule_callback_.is_null()); |
| 133 schedule_callback_ = callback; |
| 134 } |
| 135 |
| 136 static base::LazyInstance<GpuCommandQueue> g_gpu_queue = |
| 137 LAZY_INSTANCE_INITIALIZER; |
| 138 |
| 139 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient { |
| 140 public: |
| 141 explicit SchedulerClientBase(bool need_thread); |
| 142 virtual ~SchedulerClientBase(); |
| 143 |
| 144 static bool HasClients(); |
| 145 |
| 146 protected: |
| 147 scoped_refptr<GpuInProcessThread> thread_; |
| 148 |
| 149 private: |
| 150 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_; |
| 151 static base::LazyInstance<base::Lock> all_clients_lock_; |
| 152 }; |
| 153 |
| 154 base::LazyInstance<std::set<SchedulerClientBase*> > |
| 155 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER; |
| 156 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ = |
| 157 LAZY_INSTANCE_INITIALIZER; |
| 158 |
| 159 SchedulerClientBase::SchedulerClientBase(bool need_thread) { |
| 160 base::AutoLock(all_clients_lock_.Get()); |
| 161 if (need_thread) { |
| 162 if (!all_clients_.Get().empty()) { |
| 163 SchedulerClientBase* other = *all_clients_.Get().begin(); |
| 164 thread_ = other->thread_; |
| 165 DCHECK(thread_.get()); |
| 166 } else { |
| 167 thread_ = new GpuInProcessThread; |
| 168 } |
| 169 } |
| 170 all_clients_.Get().insert(this); |
| 171 } |
| 172 |
| 173 SchedulerClientBase::~SchedulerClientBase() { |
| 174 base::AutoLock(all_clients_lock_.Get()); |
| 175 all_clients_.Get().erase(this); |
| 176 } |
| 177 |
| 178 bool SchedulerClientBase::HasClients() { |
| 179 base::AutoLock(all_clients_lock_.Get()); |
| 180 return !all_clients_.Get().empty(); |
| 181 } |
| 182 |
| 183 // A client that talks to the GPU thread |
| 184 class ThreadClient : public SchedulerClientBase { |
| 185 public: |
| 186 ThreadClient(); |
| 187 virtual void QueueTask(const base::Closure& task) OVERRIDE; |
| 188 }; |
| 189 |
| 190 ThreadClient::ThreadClient() : SchedulerClientBase(true) { |
| 191 DCHECK(thread_.get()); |
| 192 } |
| 193 |
| 194 void ThreadClient::QueueTask(const base::Closure& task) { |
| 195 thread_->message_loop()->PostTask(FROM_HERE, task); |
| 196 } |
| 197 |
| 198 // A client that talks to the GpuCommandQueue |
| 199 class QueueClient : public SchedulerClientBase { |
| 200 public: |
| 201 QueueClient(); |
| 202 virtual void QueueTask(const base::Closure& task) OVERRIDE; |
| 203 }; |
| 204 |
| 205 QueueClient::QueueClient() : SchedulerClientBase(false) { |
| 206 DCHECK(!thread_.get()); |
| 207 } |
| 208 |
| 209 void QueueClient::QueueTask(const base::Closure& task) { |
| 210 g_gpu_queue.Get().QueueTask(task); |
| 211 } |
| 212 |
| 213 static scoped_ptr<InProcessCommandBuffer::SchedulerClient> |
| 214 CreateSchedulerClient() { |
| 215 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client; |
| 216 if (g_uses_explicit_scheduling) |
| 217 client.reset(new QueueClient); |
| 218 else |
| 219 client.reset(new ThreadClient); |
| 220 |
| 221 return client.Pass(); |
| 222 } |
| 223 |
| 224 class ScopedEvent { |
| 225 public: |
| 226 ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
| 227 ~ScopedEvent() { event_->Signal(); } |
| 228 |
| 229 private: |
| 230 base::WaitableEvent* event_; |
| 231 }; |
| 232 |
| 233 } // anonyous namespace |
| 234 |
| 235 InProcessCommandBuffer::InProcessCommandBuffer() |
| 236 : context_lost_(false), |
| 237 share_group_id_(0), |
| 238 last_put_offset_(-1), |
| 239 flush_event_(false, false), |
| 240 queue_(CreateSchedulerClient()) {} |
| 241 |
| 242 InProcessCommandBuffer::~InProcessCommandBuffer() { |
| 243 Destroy(); |
| 244 } |
| 245 |
| 246 bool InProcessCommandBuffer::IsContextLost() { |
| 247 if (context_lost_ || !command_buffer_) { |
| 248 return true; |
| 249 } |
| 250 CommandBuffer::State state = GetState(); |
| 251 return error::IsError(state.error); |
| 252 } |
| 253 |
| 254 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { |
| 255 DCHECK(!surface_->IsOffscreen()); |
| 256 surface_->Resize(size); |
| 257 } |
| 258 |
| 259 bool InProcessCommandBuffer::MakeCurrent() { |
| 260 command_buffer_lock_.AssertAcquired(); |
| 261 |
| 262 if (!context_lost_ && decoder_->MakeCurrent()) |
| 263 return true; |
| 264 DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
| 265 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| 266 command_buffer_->SetParseError(gpu::error::kLostContext); |
| 267 return false; |
| 268 } |
| 269 |
| 270 void InProcessCommandBuffer::PumpCommands() { |
| 271 ScopedEvent handle_flush(&flush_event_); |
| 272 command_buffer_lock_.AssertAcquired(); |
| 273 |
| 274 if (!MakeCurrent()) |
| 275 return; |
| 276 |
| 277 gpu_scheduler_->PutChanged(); |
| 278 CommandBuffer::State state = command_buffer_->GetState(); |
| 279 DCHECK((!error::IsError(state.error) && !context_lost_) || |
| 280 (error::IsError(state.error) && context_lost_)); |
| 281 } |
| 282 |
| 283 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { |
| 284 command_buffer_lock_.AssertAcquired(); |
| 285 command_buffer_->SetGetBuffer(transfer_buffer_id); |
| 286 return true; |
| 287 } |
| 288 |
| 289 bool InProcessCommandBuffer::Initialize( |
| 290 bool is_offscreen, |
| 291 bool share_resources, |
| 292 gfx::AcceleratedWidget window, |
| 293 const gfx::Size& size, |
| 294 const char* allowed_extensions, |
| 295 const std::vector<int32>& attribs, |
| 296 gfx::GpuPreference gpu_preference, |
| 297 const base::Closure& context_lost_callback, |
| 298 unsigned int share_group_id) { |
| 299 |
| 300 share_resources_ = share_resources; |
| 301 context_lost_callback_ = WrapCallback(context_lost_callback); |
| 302 share_group_id_ = share_group_id; |
| 303 |
| 304 base::WaitableEvent completion(true, false); |
| 305 bool result; |
| 306 base::Callback<bool(void)> init_task = |
| 307 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
| 308 base::Unretained(this), |
| 309 is_offscreen, |
| 310 window, |
| 311 size, |
| 312 allowed_extensions, |
| 313 attribs, |
| 314 gpu_preference); |
| 315 QueueTask( |
| 316 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); |
| 317 completion.Wait(); |
| 318 return result; |
| 319 } |
| 320 |
| 321 bool InProcessCommandBuffer::InitializeOnGpuThread( |
| 322 bool is_offscreen, |
| 323 gfx::AcceleratedWidget window, |
| 324 const gfx::Size& size, |
| 325 const char* allowed_extensions, |
| 326 const std::vector<int32>& attribs, |
| 327 gfx::GpuPreference gpu_preference) { |
| 328 // Use one share group for all contexts. |
| 329 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, |
| 330 (new gfx::GLShareGroup)); |
| 331 |
| 332 DCHECK(size.width() >= 0 && size.height() >= 0); |
| 333 |
| 334 TransferBufferManager* manager = new TransferBufferManager(); |
| 335 transfer_buffer_manager_.reset(manager); |
| 336 manager->Initialize(); |
| 337 |
| 338 scoped_ptr<CommandBufferService> command_buffer( |
| 339 new CommandBufferService(transfer_buffer_manager_.get())); |
| 340 command_buffer->SetPutOffsetChangeCallback(base::Bind( |
| 341 &InProcessCommandBuffer::PumpCommands, base::Unretained(this))); |
| 342 command_buffer->SetParseErrorCallback(base::Bind( |
| 343 &InProcessCommandBuffer::OnContextLost, base::Unretained(this))); |
| 344 |
| 345 if (!command_buffer->Initialize()) { |
| 346 LOG(ERROR) << "Could not initialize command buffer."; |
| 347 DestroyOnGpuThread(); |
| 348 return false; |
| 349 } |
| 350 |
| 351 InProcessCommandBuffer* context_group = NULL; |
| 352 |
| 353 if (share_resources_ && !g_all_shared_contexts.Get().empty()) { |
| 354 DCHECK(share_group_id_); |
| 355 for (std::set<InProcessCommandBuffer*>::iterator it = |
| 356 g_all_shared_contexts.Get().begin(); |
| 357 it != g_all_shared_contexts.Get().end(); |
| 358 ++it) { |
| 359 if ((*it)->share_group_id_ == share_group_id_) { |
| 360 context_group = *it; |
| 361 DCHECK(context_group->share_resources_); |
| 362 context_lost_ = context_group->IsContextLost(); |
| 363 break; |
| 364 } |
| 365 } |
| 366 if (!context_group) |
| 367 share_group = new gfx::GLShareGroup; |
| 368 } |
| 369 |
| 370 bool bind_generates_resource = false; |
| 371 decoder_.reset(gles2::GLES2Decoder::Create( |
| 372 context_group ? context_group->decoder_->GetContextGroup() |
| 373 : new gles2::ContextGroup( |
| 374 NULL, NULL, NULL, NULL, bind_generates_resource))); |
| 375 |
| 376 gpu_scheduler_.reset( |
| 377 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); |
| 378 command_buffer->SetGetBufferChangeCallback(base::Bind( |
| 379 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); |
| 380 command_buffer_ = command_buffer.Pass(); |
| 381 |
| 382 decoder_->set_engine(gpu_scheduler_.get()); |
| 383 |
| 384 if (is_offscreen) |
| 385 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); |
| 386 else |
| 387 surface_ = gfx::GLSurface::CreateViewGLSurface(window); |
| 388 |
| 389 if (!surface_.get()) { |
| 390 LOG(ERROR) << "Could not create GLSurface."; |
| 391 DestroyOnGpuThread(); |
| 392 return false; |
| 393 } |
| 394 |
| 395 if (g_use_virtualized_gl_context) { |
| 396 context_ = share_group->GetSharedContext(); |
| 397 if (!context_.get()) { |
| 398 context_ = gfx::GLContext::CreateGLContext( |
| 399 share_group.get(), surface_.get(), gpu_preference); |
| 400 share_group->SetSharedContext(context_.get()); |
| 401 } |
| 402 |
| 403 context_ = new GLContextVirtual( |
| 404 share_group.get(), context_.get(), decoder_->AsWeakPtr()); |
| 405 if (context_->Initialize(surface_.get(), gpu_preference)) { |
| 406 VLOG(1) << "Created virtual GL context."; |
| 407 } else { |
| 408 context_ = NULL; |
| 409 } |
| 410 } else { |
| 411 context_ = gfx::GLContext::CreateGLContext( |
| 412 share_group.get(), surface_.get(), gpu_preference); |
| 413 } |
| 414 |
| 415 if (!context_.get()) { |
| 416 LOG(ERROR) << "Could not create GLContext."; |
| 417 DestroyOnGpuThread(); |
| 418 return false; |
| 419 } |
| 420 |
| 421 if (!context_->MakeCurrent(surface_.get())) { |
| 422 LOG(ERROR) << "Could not make context current."; |
| 423 DestroyOnGpuThread(); |
| 424 return false; |
| 425 } |
| 426 |
| 427 gles2::DisallowedFeatures disallowed_features; |
| 428 disallowed_features.swap_buffer_complete_callback = true; |
| 429 disallowed_features.gpu_memory_manager = true; |
| 430 if (!decoder_->Initialize(surface_, |
| 431 context_, |
| 432 is_offscreen, |
| 433 size, |
| 434 disallowed_features, |
| 435 allowed_extensions, |
| 436 attribs)) { |
| 437 LOG(ERROR) << "Could not initialize decoder."; |
| 438 DestroyOnGpuThread(); |
| 439 return false; |
| 440 } |
| 441 |
| 442 if (!is_offscreen) { |
| 443 decoder_->SetResizeCallback(base::Bind( |
| 444 &InProcessCommandBuffer::OnResizeView, base::Unretained(this))); |
| 445 } |
| 446 |
| 447 if (share_resources_) { |
| 448 g_all_shared_contexts.Pointer()->insert(this); |
| 449 } |
| 450 |
| 451 return true; |
| 452 } |
| 453 |
| 454 void InProcessCommandBuffer::Destroy() { |
| 455 base::WaitableEvent completion(true, false); |
| 456 bool result; |
| 457 base::Callback<bool(void)> destroy_task = base::Bind( |
| 458 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
| 459 QueueTask( |
| 460 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
| 461 completion.Wait(); |
| 462 } |
| 463 |
| 464 bool InProcessCommandBuffer::DestroyOnGpuThread() { |
| 465 command_buffer_.reset(); |
| 466 // Clean up GL resources if possible. |
| 467 bool have_context = context_ && context_->MakeCurrent(surface_); |
| 468 if (decoder_) { |
| 469 decoder_->Destroy(have_context); |
| 470 decoder_.reset(); |
| 471 } |
| 472 context_ = NULL; |
| 473 surface_ = NULL; |
| 474 |
| 475 g_all_shared_contexts.Pointer()->erase(this); |
| 476 return true; |
| 477 } |
| 478 |
| 479 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( |
| 480 gfx::GpuMemoryBufferHandle buffer, |
| 481 gfx::Size size) { |
| 482 unsigned int image_id; |
| 483 { |
| 484 // TODO: ID allocation should go through CommandBuffer |
| 485 base::AutoLock lock(command_buffer_lock_); |
| 486 gles2::ContextGroup* group = decoder_->GetContextGroup(); |
| 487 image_id = |
| 488 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); |
| 489 } |
| 490 base::Closure image_task = |
| 491 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
| 492 base::Unretained(this), buffer, size, image_id); |
| 493 QueueTask(image_task); |
| 494 return image_id; |
| 495 } |
| 496 |
| 497 void InProcessCommandBuffer::CreateImageOnGpuThread( |
| 498 gfx::GpuMemoryBufferHandle buffer, |
| 499 gfx::Size size, |
| 500 unsigned int image_id) { |
| 501 scoped_refptr<gfx::GLImage> gl_image = |
| 502 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); |
| 503 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); |
| 504 } |
| 505 |
| 506 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { |
| 507 { |
| 508 // TODO: ID allocation should go through CommandBuffer |
| 509 base::AutoLock lock(command_buffer_lock_); |
| 510 gles2::ContextGroup* group = decoder_->GetContextGroup(); |
| 511 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); |
| 512 } |
| 513 base::Closure image_manager_task = |
| 514 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, |
| 515 base::Unretained(this), |
| 516 image_id); |
| 517 QueueTask(image_manager_task); |
| 518 } |
| 519 |
| 520 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { |
| 521 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); |
| 522 } |
| 523 |
| 524 void InProcessCommandBuffer::OnContextLost() { |
| 525 if (!context_lost_callback_.is_null()) |
| 526 context_lost_callback_.Run(); |
| 527 |
| 528 context_lost_ = true; |
| 529 if (share_resources_) { |
| 530 for (std::set<InProcessCommandBuffer*>::iterator it = |
| 531 g_all_shared_contexts.Get().begin(); |
| 532 it != g_all_shared_contexts.Get().end(); |
| 533 ++it) { |
| 534 (*it)->context_lost_ = true; |
| 535 } |
| 536 } |
| 537 } |
| 538 |
| 539 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
| 540 base::AutoLock lock(command_buffer_lock_); |
| 541 return last_state_ = command_buffer_->GetState(); |
| 542 } |
| 543 |
| 544 CommandBuffer::State InProcessCommandBuffer::GetState() { |
| 545 return GetStateFast(); |
| 546 } |
| 547 |
| 548 CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
| 549 return last_state_; |
| 550 } |
| 551 |
| 552 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; } |
| 553 |
| 554 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { |
| 555 base::AutoLock lock(command_buffer_lock_); |
| 556 command_buffer_->Flush(put_offset); |
| 557 } |
| 558 |
| 559 void InProcessCommandBuffer::Flush(int32 put_offset) { |
| 560 if (last_state_.error != gpu::error::kNoError) |
| 561 return; |
| 562 |
| 563 if (last_put_offset_ == put_offset) |
| 564 return; |
| 565 |
| 566 last_put_offset_ = put_offset; |
| 567 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| 568 base::Unretained(this), |
| 569 put_offset); |
| 570 QueueTask(task); |
| 571 } |
| 572 |
| 573 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, |
| 574 int32 last_known_get) { |
| 575 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) |
| 576 return last_state_; |
| 577 |
| 578 Flush(put_offset); |
| 579 GetStateFast(); |
| 580 while (last_known_get == last_state_.get_offset && |
| 581 last_state_.error == gpu::error::kNoError) { |
| 582 flush_event_.Wait(); |
| 583 GetStateFast(); |
| 584 } |
| 585 |
| 586 return last_state_; |
| 587 } |
| 588 |
| 589 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { |
| 590 if (last_state_.error != gpu::error::kNoError) |
| 591 return; |
| 592 |
| 593 { |
| 594 base::AutoLock lock(command_buffer_lock_); |
| 595 command_buffer_->SetGetBuffer(shm_id); |
| 596 last_put_offset_ = 0; |
| 597 } |
| 598 GetStateFast(); |
| 599 } |
| 600 |
| 601 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, |
| 602 int32* id) { |
| 603 base::AutoLock lock(command_buffer_lock_); |
| 604 return command_buffer_->CreateTransferBuffer(size, id); |
| 605 } |
| 606 |
| 607 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { |
| 608 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, |
| 609 base::Unretained(command_buffer_.get()), |
| 610 id); |
| 611 |
| 612 QueueTask(task); |
| 613 } |
| 614 |
| 615 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { |
| 616 NOTREACHED(); |
| 617 return gpu::Buffer(); |
| 618 } |
| 619 |
| 620 uint32 InProcessCommandBuffer::InsertSyncPoint() { |
| 621 NOTREACHED(); |
| 622 return 0; |
| 623 } |
| 624 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, |
| 625 const base::Closure& callback) { |
| 626 QueueTask(WrapCallback(callback)); |
| 627 } |
| 628 |
| 629 gpu::error::Error InProcessCommandBuffer::GetLastError() { |
| 630 return last_state_.error; |
| 631 } |
| 632 |
| 633 bool InProcessCommandBuffer::Initialize() { |
| 634 NOTREACHED(); |
| 635 return false; |
| 636 } |
| 637 |
| 638 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } |
| 639 |
| 640 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); } |
| 641 |
| 642 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) { |
| 643 NOTREACHED(); |
| 644 } |
| 645 |
| 646 void InProcessCommandBuffer::SetContextLostReason( |
| 647 gpu::error::ContextLostReason reason) { |
| 648 NOTREACHED(); |
| 649 } |
| 650 |
| 651 namespace { |
| 652 |
| 653 static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop, |
| 654 const base::Closure& callback) { |
| 655 if (loop != base::MessageLoopProxy::current()) |
| 656 loop->PostTask(FROM_HERE, callback); |
| 657 else |
| 658 callback.Run(); |
| 659 } |
| 660 |
| 661 static void RunCallback(scoped_ptr<base::Closure> callback) { |
| 662 DCHECK(callback.get()); |
| 663 callback->Run(); |
| 664 } |
| 665 |
| 666 } // anonymous namespace |
| 667 |
| 668 base::Closure InProcessCommandBuffer::WrapCallback( |
| 669 const base::Closure& callback) { |
| 670 // Make sure the callback gets deleted on the target thread by passing |
| 671 // ownership. |
| 672 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback)); |
| 673 base::Closure callback_on_client_thread = |
| 674 base::Bind(&RunCallback, base::Passed(&scoped_callback)); |
| 675 base::Closure wrapped_callback = |
| 676 base::Bind(&PostCallback, base::MessageLoopProxy::current(), |
| 677 callback); |
| 678 return wrapped_callback; |
| 679 } |
| 680 |
| 681 // static |
| 682 void InProcessCommandBuffer::EnableVirtualizedContext() { |
| 683 g_use_virtualized_gl_context = true; |
| 684 } |
| 685 |
| 686 // static |
| 687 void InProcessCommandBuffer::SetScheduleCallback( |
| 688 const base::Closure& callback) { |
| 689 DCHECK(!g_uses_explicit_scheduling); |
| 690 DCHECK(!SchedulerClientBase::HasClients()); |
| 691 g_uses_explicit_scheduling = true; |
| 692 g_gpu_queue.Get().SetScheduleCallback(callback); |
| 693 } |
| 694 |
| 695 // static |
| 696 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { |
| 697 g_gpu_queue.Get().RunTasks(); |
| 698 } |
| 699 |
| 700 } // namespace gpu |
OLD | NEW |