OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h" | 5 #include "content/common/gpu/client/command_buffer_proxy_impl.h" |
6 | 6 |
7 #include "base/callback.h" | 7 #include "base/callback.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/memory/shared_memory.h" | 9 #include "base/memory/shared_memory.h" |
10 #include "base/stl_util.h" | 10 #include "base/stl_util.h" |
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
340 if (last_state_.error != gpu::error::kNoError) | 340 if (last_state_.error != gpu::error::kNoError) |
341 return; | 341 return; |
342 | 342 |
343 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | 343 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); |
344 } | 344 } |
345 | 345 |
346 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { | 346 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { |
347 return capabilities_; | 347 return capabilities_; |
348 } | 348 } |
349 | 349 |
350 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, | 350 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer* const buffers, |
351 size_t width, | 351 size_t width, |
352 size_t height, | 352 size_t height, |
353 unsigned internalformat) { | 353 unsigned internalformat) { |
354 CheckLock(); | 354 CheckLock(); |
355 if (last_state_.error != gpu::error::kNoError) | 355 if (last_state_.error != gpu::error::kNoError) |
356 return -1; | 356 return -1; |
357 | 357 |
358 int32 new_id = channel_->ReserveImageId(); | 358 int32 new_id = channel_->ReserveImageId(); |
359 | 359 |
360 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = | 360 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = |
361 channel_->gpu_memory_buffer_manager(); | 361 channel_->gpu_memory_buffer_manager(); |
362 gfx::GpuMemoryBuffer* gpu_memory_buffer = | |
363 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffer); | |
364 DCHECK(gpu_memory_buffer); | |
365 | 362 |
366 // This handle is owned by the GPU process and must be passed to it or it | 363 // Check the buffer count for the given |internalformat| and initialize the |
367 // will leak. In otherwords, do not early out on error between here and the | 364 // vectors where data will be passed. Log and return if the |internalformat| |
368 // sending of the CreateImage IPC below. | 365 // isn't supported. |
366 int num_buffers = | |
367 gpu::ImageFactory::GpuMemoryBufferCountForImageFormat(internalformat); | |
368 if (num_buffers < 1) { | |
369 LOG(ERROR) << "Internalformat is not supported."; | |
370 return -1; | |
371 } | |
372 std::vector<gfx::GpuMemoryBufferHandle> handles; | |
373 handles.reserve(num_buffers); | |
reveman
2015/03/12 19:37:22
Don't bother with this micro optimization. Just le
emircan
2015/03/12 22:34:26
Done.
I still think it might be worth it in the
| |
374 std::vector<gfx::GpuMemoryBuffer::Format> formats; | |
375 formats.reserve(num_buffers); | |
reveman
2015/03/12 19:37:22
Ditto.
emircan
2015/03/12 22:34:26
Done.
| |
369 bool requires_sync_point = false; | 376 bool requires_sync_point = false; |
370 gfx::GpuMemoryBufferHandle handle = | |
371 channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(), | |
372 &requires_sync_point); | |
373 | 377 |
374 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat( | 378 for (int i = 0; i < num_buffers; ++i) { |
375 gfx::Size(width, height), gpu_memory_buffer->GetFormat())); | 379 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
376 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | 380 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffers[i]); |
377 internalformat, gpu_memory_buffer->GetFormat())); | 381 DCHECK(gpu_memory_buffer); |
382 | |
383 formats.push_back(gpu_memory_buffer->GetFormat()); | |
384 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat( | |
385 gfx::Size(width, height), formats[i])); | |
386 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | |
387 internalformat, i, formats[i])); | |
388 | |
389 bool buffer_requires_sync_point = false; | |
390 // This handle is owned by the GPU process and must be passed to it or it | |
391 // will leak. In other words, do not early out on error between here and the | |
392 // sending of the CreateImage IPC below. | |
393 handles.push_back(channel_->ShareGpuMemoryBufferToGpuProcess( | |
394 gpu_memory_buffer->GetHandle(), &buffer_requires_sync_point)); | |
395 | |
396 // We want to set a destruction sync point on all buffers if one happen to | |
reveman
2015/03/12 19:37:22
s/want to// as "want to" makes it sound like we ha
emircan
2015/03/12 22:34:26
Done.
| |
397 // require one. | |
398 requires_sync_point |= buffer_requires_sync_point; | |
399 } | |
400 | |
378 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_, | 401 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_, |
379 new_id, | 402 new_id, |
380 handle, | 403 handles, |
381 gfx::Size(width, height), | 404 gfx::Size(width, height), |
382 gpu_memory_buffer->GetFormat(), | 405 formats, |
383 internalformat))) { | 406 internalformat))) { |
384 return -1; | 407 return -1; |
385 } | 408 } |
386 | 409 |
387 if (requires_sync_point) { | 410 if (requires_sync_point) { |
388 gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer, | 411 for (int i = 0; i < num_buffers; ++i) { |
389 InsertSyncPoint()); | 412 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
413 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer( | |
414 buffers[i]); | |
415 gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer, | |
416 InsertSyncPoint()); | |
reveman
2015/03/12 19:37:22
InsertSyncPoint() is a relatively expensive call.
emircan
2015/03/12 22:34:26
Done.
| |
417 } | |
390 } | 418 } |
391 | 419 |
392 return new_id; | 420 return new_id; |
393 } | 421 } |
394 | 422 |
395 void CommandBufferProxyImpl::DestroyImage(int32 id) { | 423 void CommandBufferProxyImpl::DestroyImage(int32 id) { |
396 CheckLock(); | 424 CheckLock(); |
397 if (last_state_.error != gpu::error::kNoError) | 425 if (last_state_.error != gpu::error::kNoError) |
398 return; | 426 return; |
399 | 427 |
400 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); | 428 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); |
401 } | 429 } |
402 | 430 |
403 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( | 431 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( |
404 size_t width, | 432 size_t width, |
405 size_t height, | 433 size_t height, |
406 unsigned internalformat, | 434 unsigned internalformat, |
407 unsigned usage) { | 435 unsigned usage) { |
408 CheckLock(); | 436 CheckLock(); |
409 scoped_ptr<gfx::GpuMemoryBuffer> buffer( | 437 |
410 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer( | 438 int num_buffers = |
411 gfx::Size(width, height), | 439 gpu::ImageFactory::GpuMemoryBufferCountForImageFormat(internalformat); |
412 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat), | 440 if (num_buffers < 1) { |
413 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage))); | 441 LOG(ERROR) << "Internalformat is not supported."; |
414 if (!buffer) | |
415 return -1; | 442 return -1; |
443 } | |
416 | 444 |
417 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat); | 445 ScopedVector<gfx::GpuMemoryBuffer> buffers; |
446 buffers.reserve(num_buffers); | |
reveman
2015/03/12 19:37:22
Skip this optimization.
emircan
2015/03/12 22:34:26
Done.
| |
447 std::vector<ClientBuffer> client_buffers; | |
448 client_buffers.reserve(num_buffers); | |
reveman
2015/03/12 19:37:22
Ditto.
emircan
2015/03/12 22:34:26
Done.
| |
449 for (int i = 0; i < num_buffers; ++i) { | |
450 gfx::GpuMemoryBuffer::Format format = | |
451 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat, | |
452 i); | |
453 buffers.push_back( | |
454 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer( | |
455 gfx::Size(width, height), format, | |
456 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage))); | |
457 if (!buffers[i]) | |
458 return -1; | |
459 | |
460 client_buffers[i] = buffers[i]->AsClientBuffer(); | |
461 } | |
462 return CreateImage(client_buffers.data(), width, height, internalformat); | |
418 } | 463 } |
419 | 464 |
420 int CommandBufferProxyImpl::GetRouteID() const { | 465 int CommandBufferProxyImpl::GetRouteID() const { |
421 return route_id_; | 466 return route_id_; |
422 } | 467 } |
423 | 468 |
424 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) { | 469 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) { |
425 CheckLock(); | 470 CheckLock(); |
426 if (last_state_.error != gpu::error::kNoError) | 471 if (last_state_.error != gpu::error::kNoError) |
427 return 0; | 472 return 0; |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
604 } | 649 } |
605 } | 650 } |
606 | 651 |
607 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, | 652 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, |
608 base::TimeDelta interval) { | 653 base::TimeDelta interval) { |
609 if (!update_vsync_parameters_completion_callback_.is_null()) | 654 if (!update_vsync_parameters_completion_callback_.is_null()) |
610 update_vsync_parameters_completion_callback_.Run(timebase, interval); | 655 update_vsync_parameters_completion_callback_.Run(timebase, interval); |
611 } | 656 } |
612 | 657 |
613 } // namespace content | 658 } // namespace content |
OLD | NEW |