| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
| 6 | 6 |
| 7 #include <queue> | 7 #include <queue> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include <GLES2/gl2.h> | 10 #include <GLES2/gl2.h> |
| (...skipping 25 matching lines...) Expand all Loading... |
| 36 | 36 |
| 37 #if defined(OS_ANDROID) | 37 #if defined(OS_ANDROID) |
| 38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h
" | 38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h
" |
| 39 #include "ui/gl/android/surface_texture.h" | 39 #include "ui/gl/android/surface_texture.h" |
| 40 #endif | 40 #endif |
| 41 | 41 |
| 42 namespace gpu { | 42 namespace gpu { |
| 43 | 43 |
| 44 namespace { | 44 namespace { |
| 45 | 45 |
| 46 static bool g_use_virtualized_gl_context = false; | |
| 47 static bool g_uses_explicit_scheduling = false; | |
| 48 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL; | 46 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL; |
| 49 | 47 |
| 50 template <typename T> | 48 template <typename T> |
| 51 static void RunTaskWithResult(base::Callback<T(void)> task, | 49 static void RunTaskWithResult(base::Callback<T(void)> task, |
| 52 T* result, | 50 T* result, |
| 53 base::WaitableEvent* completion) { | 51 base::WaitableEvent* completion) { |
| 54 *result = task.Run(); | 52 *result = task.Run(); |
| 55 completion->Signal(); | 53 completion->Signal(); |
| 56 } | 54 } |
| 57 | 55 |
| 58 class GpuInProcessThread | 56 class GpuInProcessThread |
| 59 : public base::Thread, | 57 : public base::Thread, |
| 58 public InProcessCommandBuffer::Service, |
| 60 public base::RefCountedThreadSafe<GpuInProcessThread> { | 59 public base::RefCountedThreadSafe<GpuInProcessThread> { |
| 61 public: | 60 public: |
| 62 GpuInProcessThread(); | 61 GpuInProcessThread(); |
| 63 | 62 |
| 63 virtual void AddRef() const OVERRIDE { |
| 64 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef(); |
| 65 } |
| 66 virtual void Release() const OVERRIDE { |
| 67 base::RefCountedThreadSafe<GpuInProcessThread>::Release(); |
| 68 } |
| 69 |
| 70 virtual void ScheduleTask(const base::Closure& task) OVERRIDE; |
| 71 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; |
| 72 virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; } |
| 73 |
| 64 private: | 74 private: |
| 75 virtual ~GpuInProcessThread(); |
| 65 friend class base::RefCountedThreadSafe<GpuInProcessThread>; | 76 friend class base::RefCountedThreadSafe<GpuInProcessThread>; |
| 66 virtual ~GpuInProcessThread(); | |
| 67 | 77 |
| 68 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); | 78 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); |
| 69 }; | 79 }; |
| 70 | 80 |
| 71 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { | 81 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { |
| 72 Start(); | 82 Start(); |
| 73 } | 83 } |
| 74 | 84 |
| 75 GpuInProcessThread::~GpuInProcessThread() { | 85 GpuInProcessThread::~GpuInProcessThread() { |
| 76 Stop(); | 86 Stop(); |
| 77 } | 87 } |
| 78 | 88 |
| 79 // Used with explicit scheduling when there is no dedicated GPU thread. | 89 void GpuInProcessThread::ScheduleTask(const base::Closure& task) { |
| 80 class GpuCommandQueue { | 90 message_loop()->PostTask(FROM_HERE, task); |
| 81 public: | |
| 82 GpuCommandQueue(); | |
| 83 ~GpuCommandQueue(); | |
| 84 | |
| 85 void QueueTask(const base::Closure& task); | |
| 86 void RunTasks(); | |
| 87 void SetScheduleCallback(const base::Closure& callback); | |
| 88 | |
| 89 private: | |
| 90 base::Lock tasks_lock_; | |
| 91 std::queue<base::Closure> tasks_; | |
| 92 base::Closure schedule_callback_; | |
| 93 | |
| 94 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); | |
| 95 }; | |
| 96 | |
| 97 GpuCommandQueue::GpuCommandQueue() {} | |
| 98 | |
| 99 GpuCommandQueue::~GpuCommandQueue() { | |
| 100 base::AutoLock lock(tasks_lock_); | |
| 101 DCHECK(tasks_.empty()); | |
| 102 } | 91 } |
| 103 | 92 |
| 104 void GpuCommandQueue::QueueTask(const base::Closure& task) { | 93 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) { |
| 105 { | 94 message_loop()->PostDelayedTask( |
| 106 base::AutoLock lock(tasks_lock_); | |
| 107 tasks_.push(task); | |
| 108 } | |
| 109 | |
| 110 DCHECK(!schedule_callback_.is_null()); | |
| 111 schedule_callback_.Run(); | |
| 112 } | |
| 113 | |
| 114 void GpuCommandQueue::RunTasks() { | |
| 115 size_t num_tasks; | |
| 116 { | |
| 117 base::AutoLock lock(tasks_lock_); | |
| 118 num_tasks = tasks_.size(); | |
| 119 } | |
| 120 | |
| 121 while (num_tasks) { | |
| 122 base::Closure task; | |
| 123 { | |
| 124 base::AutoLock lock(tasks_lock_); | |
| 125 task = tasks_.front(); | |
| 126 tasks_.pop(); | |
| 127 num_tasks = tasks_.size(); | |
| 128 } | |
| 129 | |
| 130 task.Run(); | |
| 131 } | |
| 132 } | |
| 133 | |
| 134 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { | |
| 135 DCHECK(schedule_callback_.is_null()); | |
| 136 schedule_callback_ = callback; | |
| 137 } | |
| 138 | |
| 139 static base::LazyInstance<GpuCommandQueue> g_gpu_queue = | |
| 140 LAZY_INSTANCE_INITIALIZER; | |
| 141 | |
| 142 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient { | |
| 143 public: | |
| 144 explicit SchedulerClientBase(bool need_thread); | |
| 145 virtual ~SchedulerClientBase(); | |
| 146 | |
| 147 static bool HasClients(); | |
| 148 | |
| 149 protected: | |
| 150 scoped_refptr<GpuInProcessThread> thread_; | |
| 151 | |
| 152 private: | |
| 153 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_; | |
| 154 static base::LazyInstance<base::Lock> all_clients_lock_; | |
| 155 }; | |
| 156 | |
| 157 base::LazyInstance<std::set<SchedulerClientBase*> > | |
| 158 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER; | |
| 159 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ = | |
| 160 LAZY_INSTANCE_INITIALIZER; | |
| 161 | |
| 162 SchedulerClientBase::SchedulerClientBase(bool need_thread) { | |
| 163 base::AutoLock lock(all_clients_lock_.Get()); | |
| 164 if (need_thread) { | |
| 165 if (!all_clients_.Get().empty()) { | |
| 166 SchedulerClientBase* other = *all_clients_.Get().begin(); | |
| 167 thread_ = other->thread_; | |
| 168 DCHECK(thread_.get()); | |
| 169 } else { | |
| 170 thread_ = new GpuInProcessThread; | |
| 171 } | |
| 172 } | |
| 173 all_clients_.Get().insert(this); | |
| 174 } | |
| 175 | |
| 176 SchedulerClientBase::~SchedulerClientBase() { | |
| 177 base::AutoLock lock(all_clients_lock_.Get()); | |
| 178 all_clients_.Get().erase(this); | |
| 179 } | |
| 180 | |
| 181 bool SchedulerClientBase::HasClients() { | |
| 182 base::AutoLock lock(all_clients_lock_.Get()); | |
| 183 return !all_clients_.Get().empty(); | |
| 184 } | |
| 185 | |
| 186 // A client that talks to the GPU thread | |
| 187 class ThreadClient : public SchedulerClientBase { | |
| 188 public: | |
| 189 ThreadClient(); | |
| 190 virtual void QueueTask(const base::Closure& task) OVERRIDE; | |
| 191 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; | |
| 192 }; | |
| 193 | |
| 194 ThreadClient::ThreadClient() : SchedulerClientBase(true) { | |
| 195 DCHECK(thread_.get()); | |
| 196 } | |
| 197 | |
| 198 void ThreadClient::QueueTask(const base::Closure& task) { | |
| 199 thread_->message_loop()->PostTask(FROM_HERE, task); | |
| 200 } | |
| 201 | |
| 202 void ThreadClient::ScheduleIdleWork(const base::Closure& callback) { | |
| 203 thread_->message_loop()->PostDelayedTask( | |
| 204 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); | 95 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5)); |
| 205 } | 96 } |
| 206 | 97 |
| 207 // A client that talks to the GpuCommandQueue | 98 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ = |
| 208 class QueueClient : public SchedulerClientBase { | 99 LAZY_INSTANCE_INITIALIZER; |
| 209 public: | 100 base::LazyInstance<base::Lock> default_thread_clients_lock_ = |
| 210 QueueClient(); | 101 LAZY_INSTANCE_INITIALIZER; |
| 211 virtual void QueueTask(const base::Closure& task) OVERRIDE; | |
| 212 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE; | |
| 213 }; | |
| 214 | |
| 215 QueueClient::QueueClient() : SchedulerClientBase(false) { | |
| 216 DCHECK(!thread_.get()); | |
| 217 } | |
| 218 | |
| 219 void QueueClient::QueueTask(const base::Closure& task) { | |
| 220 g_gpu_queue.Get().QueueTask(task); | |
| 221 } | |
| 222 | |
| 223 void QueueClient::ScheduleIdleWork(const base::Closure& callback) { | |
| 224 // TODO(sievers): Should this do anything? | |
| 225 } | |
| 226 | |
| 227 static scoped_ptr<InProcessCommandBuffer::SchedulerClient> | |
| 228 CreateSchedulerClient() { | |
| 229 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client; | |
| 230 if (g_uses_explicit_scheduling) | |
| 231 client.reset(new QueueClient); | |
| 232 else | |
| 233 client.reset(new ThreadClient); | |
| 234 | |
| 235 return client.Pass(); | |
| 236 } | |
| 237 | 102 |
| 238 class ScopedEvent { | 103 class ScopedEvent { |
| 239 public: | 104 public: |
| 240 ScopedEvent(base::WaitableEvent* event) : event_(event) {} | 105 ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
| 241 ~ScopedEvent() { event_->Signal(); } | 106 ~ScopedEvent() { event_->Signal(); } |
| 242 | 107 |
| 243 private: | 108 private: |
| 244 base::WaitableEvent* event_; | 109 base::WaitableEvent* event_; |
| 245 }; | 110 }; |
| 246 | 111 |
| 247 } // anonyous namespace | 112 } // anonyous namespace |
| 248 | 113 |
| 249 InProcessCommandBuffer::InProcessCommandBuffer() | 114 InProcessCommandBuffer::Service::Service() {} |
| 115 |
| 116 InProcessCommandBuffer::Service::~Service() {} |
| 117 |
| 118 scoped_refptr<InProcessCommandBuffer::Service> |
| 119 InProcessCommandBuffer::GetDefaultService() { |
| 120 base::AutoLock lock(default_thread_clients_lock_.Get()); |
| 121 scoped_refptr<Service> service; |
| 122 if (!default_thread_clients_.Get().empty()) { |
| 123 InProcessCommandBuffer* other = *default_thread_clients_.Get().begin(); |
| 124 service = other->service_; |
| 125 DCHECK(service.get()); |
| 126 } else { |
| 127 service = new GpuInProcessThread; |
| 128 } |
| 129 return service; |
| 130 } |
| 131 |
| 132 InProcessCommandBuffer::InProcessCommandBuffer( |
| 133 const scoped_refptr<Service>& service) |
| 250 : context_lost_(false), | 134 : context_lost_(false), |
| 251 last_put_offset_(-1), | 135 last_put_offset_(-1), |
| 252 flush_event_(false, false), | 136 flush_event_(false, false), |
| 253 queue_(CreateSchedulerClient()), | 137 service_(service.get() ? service : GetDefaultService()), |
| 254 gpu_thread_weak_ptr_factory_(this) {} | 138 gpu_thread_weak_ptr_factory_(this) { |
| 139 if (!service) { |
| 140 base::AutoLock lock(default_thread_clients_lock_.Get()); |
| 141 default_thread_clients_.Get().insert(this); |
| 142 } |
| 143 } |
| 255 | 144 |
| 256 InProcessCommandBuffer::~InProcessCommandBuffer() { | 145 InProcessCommandBuffer::~InProcessCommandBuffer() { |
| 257 Destroy(); | 146 Destroy(); |
| 147 base::AutoLock lock(default_thread_clients_lock_.Get()); |
| 148 default_thread_clients_.Get().erase(this); |
| 258 } | 149 } |
| 259 | 150 |
| 260 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { | 151 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { |
| 261 CheckSequencedThread(); | 152 CheckSequencedThread(); |
| 262 DCHECK(!surface_->IsOffscreen()); | 153 DCHECK(!surface_->IsOffscreen()); |
| 263 surface_->Resize(size); | 154 surface_->Resize(size); |
| 264 } | 155 } |
| 265 | 156 |
| 266 bool InProcessCommandBuffer::MakeCurrent() { | 157 bool InProcessCommandBuffer::MakeCurrent() { |
| 267 CheckSequencedThread(); | 158 CheckSequencedThread(); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 294 | 185 |
| 295 bool InProcessCommandBuffer::Initialize( | 186 bool InProcessCommandBuffer::Initialize( |
| 296 scoped_refptr<gfx::GLSurface> surface, | 187 scoped_refptr<gfx::GLSurface> surface, |
| 297 bool is_offscreen, | 188 bool is_offscreen, |
| 298 gfx::AcceleratedWidget window, | 189 gfx::AcceleratedWidget window, |
| 299 const gfx::Size& size, | 190 const gfx::Size& size, |
| 300 const std::vector<int32>& attribs, | 191 const std::vector<int32>& attribs, |
| 301 gfx::GpuPreference gpu_preference, | 192 gfx::GpuPreference gpu_preference, |
| 302 const base::Closure& context_lost_callback, | 193 const base::Closure& context_lost_callback, |
| 303 InProcessCommandBuffer* share_group) { | 194 InProcessCommandBuffer* share_group) { |
| 304 | 195 DCHECK(!share_group || service_ == share_group->service_); |
| 305 context_lost_callback_ = WrapCallback(context_lost_callback); | 196 context_lost_callback_ = WrapCallback(context_lost_callback); |
| 306 | 197 |
| 307 if (surface) { | 198 if (surface) { |
| 308 // GPU thread must be the same as client thread due to GLSurface not being | 199 // GPU thread must be the same as client thread due to GLSurface not being |
| 309 // thread safe. | 200 // thread safe. |
| 310 sequence_checker_.reset(new base::SequenceChecker); | 201 sequence_checker_.reset(new base::SequenceChecker); |
| 311 surface_ = surface; | 202 surface_ = surface; |
| 312 } | 203 } |
| 313 | 204 |
| 314 gpu::Capabilities capabilities; | 205 gpu::Capabilities capabilities; |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 391 else | 282 else |
| 392 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); | 283 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); |
| 393 } | 284 } |
| 394 | 285 |
| 395 if (!surface_.get()) { | 286 if (!surface_.get()) { |
| 396 LOG(ERROR) << "Could not create GLSurface."; | 287 LOG(ERROR) << "Could not create GLSurface."; |
| 397 DestroyOnGpuThread(); | 288 DestroyOnGpuThread(); |
| 398 return false; | 289 return false; |
| 399 } | 290 } |
| 400 | 291 |
| 401 if (g_use_virtualized_gl_context) { | 292 if (service_->UseVirtualizedGLContexts()) { |
| 402 context_ = gl_share_group_->GetSharedContext(); | 293 context_ = gl_share_group_->GetSharedContext(); |
| 403 if (!context_.get()) { | 294 if (!context_.get()) { |
| 404 context_ = gfx::GLContext::CreateGLContext( | 295 context_ = gfx::GLContext::CreateGLContext( |
| 405 gl_share_group_.get(), surface_.get(), params.gpu_preference); | 296 gl_share_group_.get(), surface_.get(), params.gpu_preference); |
| 406 gl_share_group_->SetSharedContext(context_.get()); | 297 gl_share_group_->SetSharedContext(context_.get()); |
| 407 } | 298 } |
| 408 | 299 |
| 409 context_ = new GLContextVirtual( | 300 context_ = new GLContextVirtual( |
| 410 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr()); | 301 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr()); |
| 411 if (context_->Initialize(surface_.get(), params.gpu_preference)) { | 302 if (context_->Initialize(surface_.get(), params.gpu_preference)) { |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 541 base::AutoLock lock(state_after_last_flush_lock_); | 432 base::AutoLock lock(state_after_last_flush_lock_); |
| 542 state_after_last_flush_ = command_buffer_->GetState(); | 433 state_after_last_flush_ = command_buffer_->GetState(); |
| 543 } | 434 } |
| 544 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || | 435 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
| 545 (error::IsError(state_after_last_flush_.error) && context_lost_)); | 436 (error::IsError(state_after_last_flush_.error) && context_lost_)); |
| 546 | 437 |
| 547 // If we've processed all pending commands but still have pending queries, | 438 // If we've processed all pending commands but still have pending queries, |
| 548 // pump idle work until the query is passed. | 439 // pump idle work until the query is passed. |
| 549 if (put_offset == state_after_last_flush_.get_offset && | 440 if (put_offset == state_after_last_flush_.get_offset && |
| 550 gpu_scheduler_->HasMoreWork()) { | 441 gpu_scheduler_->HasMoreWork()) { |
| 551 queue_->ScheduleIdleWork( | 442 service_->ScheduleIdleWork( |
| 552 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork, | 443 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork, |
| 553 gpu_thread_weak_ptr_)); | 444 gpu_thread_weak_ptr_)); |
| 554 } | 445 } |
| 555 } | 446 } |
| 556 | 447 |
| 557 void InProcessCommandBuffer::ScheduleMoreIdleWork() { | 448 void InProcessCommandBuffer::ScheduleMoreIdleWork() { |
| 558 CheckSequencedThread(); | 449 CheckSequencedThread(); |
| 559 base::AutoLock lock(command_buffer_lock_); | 450 base::AutoLock lock(command_buffer_lock_); |
| 560 if (gpu_scheduler_->HasMoreWork()) { | 451 if (gpu_scheduler_->HasMoreWork()) { |
| 561 gpu_scheduler_->PerformIdleWork(); | 452 gpu_scheduler_->PerformIdleWork(); |
| 562 queue_->ScheduleIdleWork( | 453 service_->ScheduleIdleWork( |
| 563 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork, | 454 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork, |
| 564 gpu_thread_weak_ptr_)); | 455 gpu_thread_weak_ptr_)); |
| 565 } | 456 } |
| 566 } | 457 } |
| 567 | 458 |
| 568 void InProcessCommandBuffer::Flush(int32 put_offset) { | 459 void InProcessCommandBuffer::Flush(int32 put_offset) { |
| 569 CheckSequencedThread(); | 460 CheckSequencedThread(); |
| 570 if (last_state_.error != gpu::error::kNoError) | 461 if (last_state_.error != gpu::error::kNoError) |
| 571 return; | 462 return; |
| 572 | 463 |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 775 | 666 |
| 776 #if defined(OS_ANDROID) | 667 #if defined(OS_ANDROID) |
| 777 scoped_refptr<gfx::SurfaceTexture> | 668 scoped_refptr<gfx::SurfaceTexture> |
| 778 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) { | 669 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) { |
| 779 DCHECK(stream_texture_manager_); | 670 DCHECK(stream_texture_manager_); |
| 780 return stream_texture_manager_->GetSurfaceTexture(stream_id); | 671 return stream_texture_manager_->GetSurfaceTexture(stream_id); |
| 781 } | 672 } |
| 782 #endif | 673 #endif |
| 783 | 674 |
| 784 // static | 675 // static |
| 785 void InProcessCommandBuffer::EnableVirtualizedContext() { | |
| 786 g_use_virtualized_gl_context = true; | |
| 787 } | |
| 788 | |
| 789 // static | |
| 790 void InProcessCommandBuffer::SetScheduleCallback( | |
| 791 const base::Closure& callback) { | |
| 792 DCHECK(!g_uses_explicit_scheduling); | |
| 793 DCHECK(!SchedulerClientBase::HasClients()); | |
| 794 g_uses_explicit_scheduling = true; | |
| 795 g_gpu_queue.Get().SetScheduleCallback(callback); | |
| 796 } | |
| 797 | |
| 798 // static | |
| 799 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { | |
| 800 g_gpu_queue.Get().RunTasks(); | |
| 801 } | |
| 802 | |
| 803 // static | |
| 804 void InProcessCommandBuffer::SetGpuMemoryBufferFactory( | 676 void InProcessCommandBuffer::SetGpuMemoryBufferFactory( |
| 805 GpuMemoryBufferFactory* factory) { | 677 GpuMemoryBufferFactory* factory) { |
| 806 g_gpu_memory_buffer_factory = factory; | 678 g_gpu_memory_buffer_factory = factory; |
| 807 } | 679 } |
| 808 | 680 |
| 809 } // namespace gpu | 681 } // namespace gpu |
| OLD | NEW |