Index: cc/trees/layer_tree_host_impl.cc |
diff --git a/cc/trees/layer_tree_host_impl.cc b/cc/trees/layer_tree_host_impl.cc |
index f7e5bf6a368b93a47f257483afe58d5b78e99933..e06039c41e834ac3f7a7c3ef585fe72597b29b88 100644 |
--- a/cc/trees/layer_tree_host_impl.cc |
+++ b/cc/trees/layer_tree_host_impl.cc |
@@ -46,6 +46,7 @@ |
#include "cc/quads/shared_quad_state.h" |
#include "cc/quads/solid_color_draw_quad.h" |
#include "cc/quads/texture_draw_quad.h" |
+#include "cc/resources/bitmap_raster_worker_pool.h" |
#include "cc/resources/eviction_tile_priority_queue.h" |
#include "cc/resources/gpu_raster_worker_pool.h" |
#include "cc/resources/memory_history.h" |
@@ -91,11 +92,6 @@ void DidVisibilityChange(cc::LayerTreeHostImpl* id, bool visible) { |
size_t GetMaxTransferBufferUsageBytes(cc::ContextProvider* context_provider, |
double refresh_rate) { |
- // Software compositing should not use this value in production. Just use a |
- // default value when testing uploads with the software compositor. |
- if (!context_provider) |
vmpstr
2014/09/24 16:31:51
Can you instead DCHECK(context_provider)
reveman
2014/09/24 17:25:27
I think it was always awkward that we could pass N
|
- return std::numeric_limits<size_t>::max(); |
- |
// We want to make sure the default transfer buffer size is equal to the |
// amount of data that can be uploaded by the compositor to avoid stalling |
// the pipeline. |
@@ -118,9 +114,6 @@ size_t GetMaxTransferBufferUsageBytes(cc::ContextProvider* context_provider, |
} |
unsigned GetMapImageTextureTarget(cc::ContextProvider* context_provider) { |
- if (!context_provider) |
vmpstr
2014/09/24 16:31:51
Same here.
reveman
2014/09/24 17:25:27
Same reply as above.
|
- return GL_TEXTURE_2D; |
- |
if (context_provider->ContextCapabilities().gpu.egl_image_external) |
return GL_TEXTURE_EXTERNAL_OES; |
if (context_provider->ContextCapabilities().gpu.texture_rectangle) |
@@ -1993,7 +1986,17 @@ void LayerTreeHostImpl::CreateAndSetTileManager() { |
context_provider, |
resource_provider_.get(), |
staging_resource_pool_.get()); |
- } else if (!UseZeroCopyRasterizer() && context_provider) { |
+ } else if (UseZeroCopyRasterizer() && context_provider) { |
vmpstr
2014/09/24 16:31:51
I think this whole if/else if sequence is better w
reveman
2014/09/24 17:25:27
Yes, much better. Done.
|
+ resource_pool_ = |
+ ResourcePool::Create(resource_provider_.get(), |
+ GetMapImageTextureTarget(context_provider), |
+ resource_provider_->best_texture_format()); |
+ |
+ raster_worker_pool_ = |
+ ZeroCopyRasterWorkerPool::Create(proxy_->ImplThreadTaskRunner(), |
+ RasterWorkerPool::GetTaskGraphRunner(), |
+ resource_provider_.get()); |
+ } else if (context_provider) { |
resource_pool_ = ResourcePool::Create( |
resource_provider_.get(), |
GL_TEXTURE_2D, |
@@ -2009,13 +2012,13 @@ void LayerTreeHostImpl::CreateAndSetTileManager() { |
} else { |
resource_pool_ = |
ResourcePool::Create(resource_provider_.get(), |
- GetMapImageTextureTarget(context_provider), |
+ GL_TEXTURE_2D, |
resource_provider_->best_texture_format()); |
raster_worker_pool_ = |
- ZeroCopyRasterWorkerPool::Create(proxy_->ImplThreadTaskRunner(), |
- RasterWorkerPool::GetTaskGraphRunner(), |
- resource_provider_.get()); |
+ BitmapRasterWorkerPool::Create(proxy_->ImplThreadTaskRunner(), |
+ RasterWorkerPool::GetTaskGraphRunner(), |
+ resource_provider_.get()); |
} |
tile_manager_ = |
@@ -2043,11 +2046,7 @@ bool LayerTreeHostImpl::UsePendingTreeForSync() const { |
} |
bool LayerTreeHostImpl::UseZeroCopyRasterizer() const { |
- // Note: we use zero-copy by default when the renderer is using |
- // shared memory resources. |
- return (settings_.use_zero_copy || |
- GetRendererCapabilities().using_shared_memory_resources) && |
- GetRendererCapabilities().using_map_image; |
+ return settings_.use_zero_copy && GetRendererCapabilities().using_map_image; |
} |
bool LayerTreeHostImpl::UseOneCopyRasterizer() const { |