Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | |
| 6 | |
| 7 #include <queue> | |
| 8 #include <utility> | |
| 9 | |
| 10 #include <GLES2/gl2.h> | |
| 11 #ifndef GL_GLEXT_PROTOTYPES | |
| 12 #define GL_GLEXT_PROTOTYPES 1 | |
| 13 #endif | |
| 14 #include <GLES2/gl2ext.h> | |
| 15 #include <GLES2/gl2extchromium.h> | |
| 16 | |
| 17 #include "base/bind.h" | |
| 18 #include "base/bind_helpers.h" | |
| 19 #include "base/lazy_instance.h" | |
| 20 #include "base/logging.h" | |
| 21 #include "base/memory/scoped_ptr.h" | |
| 22 #include "base/memory/weak_ptr.h" | |
| 23 #include "base/message_loop/message_loop.h" | |
| 24 #include "base/message_loop/message_loop_proxy.h" | |
| 25 #include "base/threading/thread.h" | |
| 26 #include "gpu/command_buffer/common/id_allocator.h" | |
| 27 #include "gpu/command_buffer/service/command_buffer_service.h" | |
| 28 #include "gpu/command_buffer/service/context_group.h" | |
| 29 #include "gpu/command_buffer/service/gl_context_virtual.h" | |
| 30 #include "gpu/command_buffer/service/gpu_scheduler.h" | |
| 31 #include "gpu/command_buffer/service/image_manager.h" | |
| 32 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | |
| 33 #include "ui/gfx/size.h" | |
| 34 #include "ui/gl/gl_context.h" | |
| 35 #include "ui/gl/gl_image.h" | |
| 36 #include "ui/gl/gl_share_group.h" | |
| 37 #include "ui/gl/gl_surface.h" | |
| 38 | |
| 39 namespace gpu { | |
| 40 | |
| 41 namespace { | |
| 42 | |
| 43 static base::LazyInstance<std::set<InProcessCommandBuffer*> > | |
| 44 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; | |
| 45 | |
| 46 static bool g_use_virtualized_gl_context = false; | |
| 47 | |
| 48 template <typename T> | |
| 49 static void RunTaskWithResult(base::Callback<T(void)> task, | |
| 50 T* result, | |
| 51 base::WaitableEvent* completion) { | |
| 52 *result = task.Run(); | |
| 53 completion->Signal(); | |
| 54 } | |
| 55 | |
| 56 class GpuInProcessThread : public base::Thread { | |
| 57 public: | |
| 58 GpuInProcessThread(); | |
| 59 virtual ~GpuInProcessThread(); | |
| 60 | |
| 61 private: | |
| 62 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); | |
| 63 }; | |
| 64 | |
| 65 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { | |
| 66 Start(); | |
| 67 } | |
| 68 | |
| 69 GpuInProcessThread::~GpuInProcessThread() { | |
| 70 Stop(); | |
| 71 } | |
| 72 | |
| 73 class GpuCommandQueue { | |
| 74 public: | |
| 75 GpuCommandQueue(); | |
| 76 virtual ~GpuCommandQueue(); | |
| 77 | |
| 78 void QueueTask(const base::Closure& task); | |
| 79 void RunTasks(); | |
| 80 void SetScheduleCallback(const base::Closure& callback); | |
| 81 | |
| 82 private: | |
| 83 base::Lock tasks_lock_; | |
| 84 std::queue<base::Closure> tasks_; | |
| 85 | |
| 86 base::Closure schedule_callback_; | |
| 87 base::LazyInstance<GpuInProcessThread>::Leaky thread_; | |
|
piman
2013/07/30 03:51:44
Can we avoid leaking the thread? E.g. when the las
no sievers
2013/07/31 18:19:28
Done. I have made the thread refcounted and added
| |
| 88 | |
| 89 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); | |
| 90 }; | |
| 91 | |
| 92 GpuCommandQueue::GpuCommandQueue() {} | |
| 93 | |
| 94 GpuCommandQueue::~GpuCommandQueue() { | |
| 95 base::AutoLock lock(tasks_lock_); | |
| 96 DCHECK(tasks_.empty()); | |
| 97 } | |
| 98 | |
| 99 void GpuCommandQueue::QueueTask(const base::Closure& task) { | |
| 100 { | |
| 101 base::AutoLock lock(tasks_lock_); | |
| 102 tasks_.push(task); | |
| 103 } | |
| 104 | |
| 105 if (!schedule_callback_.is_null()) { | |
| 106 schedule_callback_.Run(); | |
| 107 return; | |
| 108 } | |
| 109 thread_.Get().message_loop() | |
| 110 ->PostTask(FROM_HERE, | |
| 111 base::Bind(&GpuCommandQueue::RunTasks, | |
| 112 base::Unretained(this))); | |
| 113 } | |
| 114 | |
| 115 void GpuCommandQueue::RunTasks() { | |
| 116 size_t num_tasks; | |
| 117 { | |
| 118 base::AutoLock lock(tasks_lock_); | |
| 119 num_tasks = tasks_.size(); | |
| 120 } | |
| 121 | |
| 122 while (num_tasks) { | |
| 123 base::Closure task; | |
| 124 { | |
| 125 base::AutoLock lock(tasks_lock_); | |
| 126 task = tasks_.front(); | |
| 127 tasks_.pop(); | |
| 128 num_tasks = tasks_.size(); | |
| 129 } | |
| 130 | |
| 131 task.Run(); | |
| 132 } | |
| 133 } | |
| 134 | |
| 135 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { | |
| 136 DCHECK(schedule_callback_.is_null()); | |
| 137 schedule_callback_ = callback; | |
| 138 } | |
| 139 | |
| 140 static base::LazyInstance<GpuCommandQueue>::Leaky g_gpu_queue = | |
| 141 LAZY_INSTANCE_INITIALIZER; | |
|
piman
2013/07/30 03:51:44
Same here, can we avoid leaking the queue?
no sievers
2013/07/31 18:19:28
Done.
| |
| 142 | |
| 143 static void QueueTask(const base::Closure& task) { | |
| 144 g_gpu_queue.Get().QueueTask(task); | |
| 145 } | |
| 146 | |
| 147 class ScopedEvent { | |
| 148 public: | |
| 149 ScopedEvent(base::WaitableEvent* event) : event_(event) {} | |
| 150 ~ScopedEvent() { event_->Signal(); } | |
| 151 | |
| 152 private: | |
| 153 base::WaitableEvent* event_; | |
| 154 }; | |
| 155 | |
| 156 } // anonyous namespace | |
| 157 | |
| 158 InProcessCommandBuffer::InProcessCommandBuffer() | |
| 159 : context_lost_(false), | |
| 160 share_group_id_(0), | |
| 161 last_put_offset_(-1), | |
| 162 flush_event_(false, false) {} | |
| 163 | |
| 164 InProcessCommandBuffer::~InProcessCommandBuffer() { | |
| 165 Destroy(); | |
| 166 } | |
| 167 | |
| 168 bool InProcessCommandBuffer::IsContextLost() { | |
| 169 if (context_lost_ || !command_buffer_) { | |
| 170 return true; | |
| 171 } | |
| 172 CommandBuffer::State state = GetState(); | |
| 173 return error::IsError(state.error); | |
| 174 } | |
| 175 | |
| 176 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { | |
| 177 DCHECK(!surface_->IsOffscreen()); | |
| 178 surface_->Resize(size); | |
| 179 } | |
| 180 | |
| 181 bool InProcessCommandBuffer::MakeCurrent() { | |
| 182 command_buffer_lock_.AssertAcquired(); | |
| 183 | |
| 184 if (!context_lost_ && decoder_->MakeCurrent()) | |
| 185 return true; | |
| 186 DLOG(ERROR) << "Context lost because MakeCurrent failed."; | |
| 187 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | |
| 188 command_buffer_->SetParseError(gpu::error::kLostContext); | |
| 189 return false; | |
| 190 } | |
| 191 | |
| 192 void InProcessCommandBuffer::PumpCommands() { | |
| 193 ScopedEvent handle_flush(&flush_event_); | |
| 194 command_buffer_lock_.AssertAcquired(); | |
| 195 | |
| 196 if (!MakeCurrent()) | |
| 197 return; | |
| 198 | |
| 199 gpu_scheduler_->PutChanged(); | |
| 200 CommandBuffer::State state = command_buffer_->GetState(); | |
| 201 DCHECK((!error::IsError(state.error) && !context_lost_) || | |
| 202 (error::IsError(state.error) && context_lost_)); | |
| 203 } | |
| 204 | |
| 205 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { | |
| 206 command_buffer_lock_.AssertAcquired(); | |
| 207 command_buffer_->SetGetBuffer(transfer_buffer_id); | |
| 208 return true; | |
| 209 } | |
| 210 | |
| 211 bool InProcessCommandBuffer::Initialize( | |
| 212 bool is_offscreen, | |
| 213 bool share_resources, | |
| 214 gfx::AcceleratedWidget window, | |
| 215 const gfx::Size& size, | |
| 216 const char* allowed_extensions, | |
| 217 const std::vector<int32>& attribs, | |
| 218 gfx::GpuPreference gpu_preference, | |
| 219 const base::Closure& context_lost_callback, | |
| 220 unsigned int share_group_id) { | |
| 221 | |
| 222 share_resources_ = share_resources; | |
| 223 context_lost_callback_ = WrapCallback(context_lost_callback); | |
| 224 share_group_id_ = share_group_id; | |
| 225 | |
| 226 base::WaitableEvent completion(true, false); | |
| 227 bool result; | |
| 228 base::Callback<bool(void)> init_task = | |
| 229 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, | |
| 230 base::Unretained(this), | |
| 231 is_offscreen, | |
| 232 window, | |
| 233 size, | |
| 234 allowed_extensions, | |
| 235 attribs, | |
| 236 gpu_preference); | |
| 237 QueueTask( | |
| 238 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); | |
| 239 completion.Wait(); | |
| 240 return result; | |
| 241 } | |
| 242 | |
| 243 bool InProcessCommandBuffer::InitializeOnGpuThread( | |
| 244 bool is_offscreen, | |
| 245 gfx::AcceleratedWidget window, | |
| 246 const gfx::Size& size, | |
| 247 const char* allowed_extensions, | |
| 248 const std::vector<int32>& attribs, | |
| 249 gfx::GpuPreference gpu_preference) { | |
| 250 // Use one share group for all contexts. | |
| 251 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, | |
| 252 (new gfx::GLShareGroup)); | |
| 253 | |
| 254 DCHECK(size.width() >= 0 && size.height() >= 0); | |
| 255 | |
| 256 TransferBufferManager* manager = new TransferBufferManager(); | |
| 257 transfer_buffer_manager_.reset(manager); | |
| 258 manager->Initialize(); | |
| 259 | |
| 260 scoped_ptr<CommandBufferService> command_buffer( | |
| 261 new CommandBufferService(transfer_buffer_manager_.get())); | |
| 262 command_buffer->SetPutOffsetChangeCallback(base::Bind( | |
| 263 &InProcessCommandBuffer::PumpCommands, base::Unretained(this))); | |
| 264 command_buffer->SetParseErrorCallback(base::Bind( | |
| 265 &InProcessCommandBuffer::OnContextLost, base::Unretained(this))); | |
| 266 | |
| 267 if (!command_buffer->Initialize()) { | |
| 268 LOG(ERROR) << "Could not initialize command buffer."; | |
| 269 Destroy(); | |
| 270 return false; | |
| 271 } | |
| 272 | |
| 273 InProcessCommandBuffer* context_group = NULL; | |
| 274 | |
| 275 if (share_resources_ && !g_all_shared_contexts.Get().empty()) { | |
| 276 DCHECK(share_group_id_); | |
| 277 for (std::set<InProcessCommandBuffer*>::iterator it = | |
| 278 g_all_shared_contexts.Get().begin(); | |
| 279 it != g_all_shared_contexts.Get().end(); | |
| 280 ++it) { | |
| 281 if ((*it)->share_group_id_ == share_group_id_) { | |
| 282 context_group = *it; | |
| 283 DCHECK(context_group->share_resources_); | |
| 284 context_lost_ = context_group->IsContextLost(); | |
| 285 break; | |
| 286 } | |
| 287 } | |
| 288 if (!context_group) | |
| 289 share_group = new gfx::GLShareGroup; | |
| 290 } | |
| 291 | |
| 292 // TODO(gman): This needs to be true if this is Pepper. | |
|
piman
2013/07/30 03:51:44
nit: remove obsolete comment - Pepper will not use
no sievers
2013/07/31 18:19:28
Done.
| |
| 293 bool bind_generates_resource = false; | |
| 294 decoder_.reset(gles2::GLES2Decoder::Create( | |
| 295 context_group ? context_group->decoder_->GetContextGroup() | |
| 296 : new gles2::ContextGroup( | |
| 297 NULL, NULL, NULL, NULL, bind_generates_resource))); | |
| 298 | |
| 299 gpu_scheduler_.reset( | |
| 300 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); | |
| 301 command_buffer->SetGetBufferChangeCallback(base::Bind( | |
| 302 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); | |
| 303 command_buffer_ = command_buffer.Pass(); | |
| 304 | |
| 305 decoder_->set_engine(gpu_scheduler_.get()); | |
| 306 | |
| 307 if (is_offscreen) | |
| 308 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); | |
| 309 else | |
| 310 surface_ = gfx::GLSurface::CreateViewGLSurface(window); | |
| 311 | |
| 312 if (!surface_.get()) { | |
| 313 LOG(ERROR) << "Could not create GLSurface."; | |
| 314 Destroy(); | |
|
piman
2013/07/30 03:51:44
DestroyOnGpuThread
no sievers
2013/07/31 18:19:28
Done.
| |
| 315 return false; | |
| 316 } | |
| 317 | |
| 318 if (g_use_virtualized_gl_context) { | |
| 319 context_ = share_group->GetSharedContext(); | |
| 320 if (!context_.get()) { | |
| 321 context_ = gfx::GLContext::CreateGLContext( | |
| 322 share_group.get(), surface_.get(), gpu_preference); | |
| 323 share_group->SetSharedContext(context_.get()); | |
| 324 } | |
| 325 | |
| 326 context_ = new GLContextVirtual( | |
| 327 share_group.get(), context_.get(), decoder_->AsWeakPtr()); | |
| 328 if (context_->Initialize(surface_.get(), gpu_preference)) { | |
| 329 VLOG(1) << "Created virtual GL context."; | |
| 330 } else { | |
| 331 context_ = NULL; | |
| 332 } | |
| 333 } else { | |
| 334 context_ = gfx::GLContext::CreateGLContext( | |
| 335 share_group.get(), surface_.get(), gpu_preference); | |
| 336 } | |
| 337 | |
| 338 if (!context_.get()) { | |
| 339 LOG(ERROR) << "Could not create GLContext."; | |
| 340 Destroy(); | |
|
piman
2013/07/30 03:51:44
DestroyOnGpuThread
no sievers
2013/07/31 18:19:28
Done.
| |
| 341 return false; | |
| 342 } | |
| 343 | |
| 344 if (!context_->MakeCurrent(surface_.get())) { | |
| 345 LOG(ERROR) << "Could not make context current."; | |
| 346 Destroy(); | |
|
piman
2013/07/30 03:51:44
DestroyOnGpuThread
no sievers
2013/07/31 18:19:28
Done.
| |
| 347 return false; | |
| 348 } | |
| 349 | |
| 350 gles2::DisallowedFeatures disallowed_features; | |
| 351 disallowed_features.swap_buffer_complete_callback = true; | |
| 352 disallowed_features.gpu_memory_manager = true; | |
| 353 if (!decoder_->Initialize(surface_, | |
| 354 context_, | |
| 355 is_offscreen, | |
| 356 size, | |
| 357 disallowed_features, | |
| 358 allowed_extensions, | |
| 359 attribs)) { | |
| 360 LOG(ERROR) << "Could not initialize decoder."; | |
| 361 Destroy(); | |
|
piman
2013/07/30 03:51:44
DestroyOnGpuThread
no sievers
2013/07/31 18:19:28
Done.
| |
| 362 return false; | |
| 363 } | |
| 364 | |
| 365 if (!is_offscreen) { | |
| 366 decoder_->SetResizeCallback(base::Bind( | |
| 367 &InProcessCommandBuffer::OnResizeView, base::Unretained(this))); | |
| 368 } | |
| 369 | |
| 370 if (share_resources_) { | |
| 371 g_all_shared_contexts.Pointer()->insert(this); | |
| 372 } | |
| 373 | |
| 374 return true; | |
| 375 } | |
| 376 | |
| 377 void InProcessCommandBuffer::Destroy() { | |
| 378 base::WaitableEvent completion(true, false); | |
| 379 bool result; | |
| 380 base::Callback<bool(void)> destroy_task = base::Bind( | |
| 381 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | |
| 382 QueueTask( | |
| 383 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | |
| 384 completion.Wait(); | |
| 385 } | |
| 386 | |
| 387 bool InProcessCommandBuffer::DestroyOnGpuThread() { | |
| 388 command_buffer_.reset(); | |
| 389 // Clean up GL resources if possible. | |
| 390 bool have_context = context_ && context_->MakeCurrent(surface_); | |
| 391 if (decoder_) { | |
| 392 decoder_->Destroy(have_context); | |
| 393 decoder_.reset(); | |
| 394 } | |
| 395 context_ = NULL; | |
| 396 surface_ = NULL; | |
| 397 | |
| 398 g_all_shared_contexts.Pointer()->erase(this); | |
| 399 return true; | |
| 400 } | |
| 401 | |
| 402 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( | |
| 403 gfx::GpuMemoryBufferHandle buffer, | |
| 404 gfx::Size size) { | |
| 405 unsigned int image_id; | |
| 406 { | |
| 407 // TODO: ID allocation should go through CommandBuffer | |
| 408 base::AutoLock lock(command_buffer_lock_); | |
| 409 gles2::ContextGroup* group = decoder_->GetContextGroup(); | |
| 410 image_id = | |
| 411 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); | |
| 412 } | |
| 413 base::Closure image_task = | |
| 414 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, | |
| 415 base::Unretained(this), buffer, size, image_id); | |
| 416 QueueTask(image_task); | |
| 417 return image_id; | |
| 418 } | |
| 419 | |
| 420 void InProcessCommandBuffer::CreateImageOnGpuThread( | |
| 421 gfx::GpuMemoryBufferHandle buffer, | |
| 422 gfx::Size size, | |
| 423 unsigned int image_id) { | |
| 424 scoped_refptr<gfx::GLImage> gl_image = | |
| 425 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); | |
| 426 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); | |
| 427 } | |
| 428 | |
| 429 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { | |
| 430 { | |
| 431 // TODO: ID allocation should go through CommandBuffer | |
| 432 base::AutoLock lock(command_buffer_lock_); | |
| 433 gles2::ContextGroup* group = decoder_->GetContextGroup(); | |
| 434 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); | |
| 435 } | |
| 436 base::Closure image_manager_task = | |
| 437 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, | |
| 438 base::Unretained(this), | |
| 439 image_id); | |
| 440 QueueTask(image_manager_task); | |
| 441 } | |
| 442 | |
| 443 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { | |
| 444 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); | |
| 445 } | |
| 446 | |
| 447 void InProcessCommandBuffer::OnContextLost() { | |
| 448 if (!context_lost_callback_.is_null()) | |
| 449 context_lost_callback_.Run(); | |
| 450 | |
| 451 context_lost_ = true; | |
| 452 if (share_resources_) { | |
| 453 for (std::set<InProcessCommandBuffer*>::iterator it = | |
| 454 g_all_shared_contexts.Get().begin(); | |
| 455 it != g_all_shared_contexts.Get().end(); | |
| 456 ++it) { | |
| 457 (*it)->context_lost_ = true; | |
| 458 if (!(*it)->context_lost_callback_.is_null() && (*it) != this) | |
| 459 (*it)->context_lost_callback_.Run(); | |
| 460 } | |
| 461 } | |
| 462 } | |
| 463 | |
| 464 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | |
| 465 base::AutoLock lock(command_buffer_lock_); | |
| 466 return last_state_ = command_buffer_->GetState(); | |
| 467 } | |
| 468 | |
| 469 CommandBuffer::State InProcessCommandBuffer::GetState() { | |
| 470 return GetStateFast(); | |
| 471 } | |
| 472 | |
| 473 CommandBuffer::State InProcessCommandBuffer::GetLastState() { | |
| 474 return last_state_; | |
| 475 } | |
| 476 | |
| 477 int32 InProcessCommandBuffer::GetLastToken() { return last_state_.token; } | |
| 478 | |
| 479 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { | |
| 480 base::AutoLock lock(command_buffer_lock_); | |
| 481 command_buffer_->Flush(put_offset); | |
| 482 } | |
| 483 | |
| 484 void InProcessCommandBuffer::Flush(int32 put_offset) { | |
| 485 if (last_state_.error != gpu::error::kNoError) | |
| 486 return; | |
| 487 | |
| 488 if (last_put_offset_ == put_offset) | |
| 489 return; | |
| 490 | |
| 491 last_put_offset_ = put_offset; | |
| 492 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | |
| 493 base::Unretained(this), | |
| 494 put_offset); | |
| 495 QueueTask(task); | |
| 496 } | |
| 497 | |
| 498 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, | |
| 499 int32 last_known_get) { | |
| 500 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) | |
| 501 return last_state_; | |
| 502 | |
| 503 Flush(put_offset); | |
| 504 GetStateFast(); | |
| 505 while (last_known_get == last_state_.get_offset && | |
| 506 last_state_.error == gpu::error::kNoError) { | |
| 507 flush_event_.Wait(); | |
| 508 GetStateFast(); | |
| 509 } | |
| 510 | |
| 511 return last_state_; | |
| 512 } | |
| 513 | |
| 514 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { | |
| 515 if (last_state_.error != gpu::error::kNoError) | |
| 516 return; | |
| 517 | |
| 518 { | |
| 519 base::AutoLock lock(command_buffer_lock_); | |
| 520 command_buffer_->SetGetBuffer(shm_id); | |
| 521 last_put_offset_ = 0; | |
| 522 } | |
| 523 GetStateFast(); | |
| 524 } | |
| 525 | |
| 526 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, | |
| 527 int32* id) { | |
| 528 base::AutoLock lock(command_buffer_lock_); | |
| 529 return command_buffer_->CreateTransferBuffer(size, id); | |
| 530 } | |
| 531 | |
| 532 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { | |
| 533 base::AutoLock lock(command_buffer_lock_); | |
|
piman
2013/07/30 03:51:44
I don't think you need this lock:
- this is called
no sievers
2013/07/31 18:19:28
Done.
| |
| 534 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, | |
| 535 base::Unretained(command_buffer_.get()), | |
| 536 id); | |
| 537 | |
| 538 QueueTask(task); | |
| 539 } | |
| 540 | |
| 541 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { | |
| 542 NOTREACHED(); | |
| 543 return gpu::Buffer(); | |
| 544 } | |
| 545 | |
| 546 uint32 InProcessCommandBuffer::InsertSyncPoint() { | |
| 547 NOTREACHED(); | |
| 548 return 0; | |
| 549 } | |
| 550 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, | |
| 551 const base::Closure& callback) { | |
| 552 QueueTask(WrapCallback(callback)); | |
| 553 } | |
| 554 | |
| 555 gpu::error::Error InProcessCommandBuffer::GetLastError() { | |
| 556 return last_state_.error; | |
| 557 } | |
| 558 | |
| 559 bool InProcessCommandBuffer::Initialize() { | |
| 560 NOTREACHED(); | |
| 561 return false; | |
| 562 } | |
| 563 | |
| 564 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } | |
| 565 | |
| 566 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); } | |
| 567 | |
| 568 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) { | |
| 569 NOTREACHED(); | |
| 570 } | |
| 571 | |
| 572 void InProcessCommandBuffer::SetContextLostReason( | |
| 573 gpu::error::ContextLostReason reason) { | |
| 574 NOTREACHED(); | |
| 575 } | |
| 576 | |
| 577 static void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop, | |
| 578 const base::Closure& callback) { | |
| 579 if (loop != base::MessageLoopProxy::current()) | |
| 580 loop->PostTask(FROM_HERE, callback); | |
| 581 else | |
| 582 callback.Run(); | |
| 583 } | |
| 584 | |
| 585 base::Closure InProcessCommandBuffer::WrapCallback( | |
| 586 const base::Closure& callback) { | |
| 587 base::Closure wrapped_callback = | |
| 588 base::Bind(&PostCallback, base::MessageLoopProxy::current(), callback); | |
|
piman
2013/07/30 03:51:44
This pattern is (unfortunately) dangerous: |callba
no sievers
2013/07/31 18:19:28
Thanks for pointing that out!
This is not getting
| |
| 589 return wrapped_callback; | |
| 590 } | |
| 591 | |
| 592 // static | |
| 593 void InProcessCommandBuffer::EnableVirtualizedContext() { | |
| 594 g_use_virtualized_gl_context = true; | |
| 595 } | |
| 596 | |
| 597 // static | |
| 598 void InProcessCommandBuffer::SetScheduleCallback( | |
| 599 const base::Closure& callback) { | |
| 600 g_gpu_queue.Get().SetScheduleCallback(callback); | |
| 601 } | |
| 602 | |
| 603 // static | |
| 604 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { | |
| 605 g_gpu_queue.Get().RunTasks(); | |
| 606 } | |
| 607 | |
| 608 } // namespace gpu | |
| OLD | NEW |