Index: cc/layers/picture_layer_impl.cc |
diff --git a/cc/layers/picture_layer_impl.cc b/cc/layers/picture_layer_impl.cc |
index 37c83289700ab9fef4cd1fb356d0f9c354d29b0f..7f78821c46cf5496a38f293897a20d719e04d823 100644 |
--- a/cc/layers/picture_layer_impl.cc |
+++ b/cc/layers/picture_layer_impl.cc |
@@ -44,6 +44,11 @@ const float kCpuSkewportTargetTimeInFrames = 60.0f; |
// TileManager::BinFromTilePriority). |
const float kGpuSkewportTargetTimeInFrames = 0.0f; |
+// Even for really wide viewports, at some point GPU raster should use |
+// less than 4 tiles to fill the viewport. This is set to 128 as a |
+// sane minimum for now, but we might want to increase with tuning. |
+const int kMinHeightForGpuRasteredTile = 128; |
+ |
} // namespace |
namespace cc { |
@@ -675,33 +680,44 @@ gfx::Size PictureLayerImpl::CalculateTileSize( |
} |
gfx::Size default_tile_size = layer_tree_impl()->settings().default_tile_size; |
- if (layer_tree_impl()->use_gpu_rasterization()) { |
- // TODO(ernstm) crbug.com/365877: We need a unified way to override the |
- // default-tile-size. |
- default_tile_size = |
- gfx::Size(layer_tree_impl()->device_viewport_size().width(), |
- layer_tree_impl()->device_viewport_size().height() / 4); |
- } |
- default_tile_size.SetToMin(gfx::Size(max_texture_size, max_texture_size)); |
- |
gfx::Size max_untiled_content_size = |
layer_tree_impl()->settings().max_untiled_layer_size; |
+ |
+ // For GPU rasterization, we pick an ideal tile size using the viewport, |
+ // so we don't need the above settings. |
+ bool use_gpu = layer_tree_impl()->use_gpu_rasterization(); |
+ if (use_gpu) { |
vmpstr
2014/10/09 16:35:43
For complexity thing, maybe just have the two vari
epenner
2014/10/09 18:36:33
I dunno, I think once you see the code you might n
vmpstr
2014/10/09 18:38:18
Fair enough. That's why I'm not really sure how to
|
+ int width = layer_tree_impl()->device_viewport_size().width(); |
+ int height = layer_tree_impl()->device_viewport_size().height() / 4; |
+ height = std::max(height, kMinHeightForGpuRasteredTile); |
+ default_tile_size = gfx::Size(width, height); |
+ // Since the width is already expanded to viewport width, we use |
+ // double the height as our max untiled size. |
+ max_untiled_content_size = gfx::Size(height * 2, height * 2); |
+ } |
+ |
+ default_tile_size.SetToMin(gfx::Size(max_texture_size, max_texture_size)); |
max_untiled_content_size.SetToMin( |
gfx::Size(max_texture_size, max_texture_size)); |
- bool any_dimension_too_large = |
- content_bounds.width() > max_untiled_content_size.width() || |
- content_bounds.height() > max_untiled_content_size.height(); |
+ bool both_dimensions_are_small = |
+ content_bounds.width() <= max_untiled_content_size.width() && |
+ content_bounds.height() <= max_untiled_content_size.height(); |
- bool any_dimension_one_tile = |
+ bool long_and_skinny = |
content_bounds.width() <= default_tile_size.width() || |
content_bounds.height() <= default_tile_size.height(); |
+ // Using GPU raster the width is already expanded to the viewport, so we just |
+ // use the height to determine if the layer is skinny horizontally. |
+ if (use_gpu) |
+ long_and_skinny = content_bounds.width() <= default_tile_size.height(); |
vmpstr
2014/10/09 16:35:43
typo? This should probably compare height with hei
epenner
2014/10/09 18:36:32
This is what I intended. But perhaps I need to imp
|
+ |
// If long and skinny, tile at the max untiled content size, and clamp |
// the smaller dimension to the content size, e.g. 1000x12 layer with |
// 500x500 max untiled size would get 500x12 tiles. Also do this |
// if the layer is small. |
- if (any_dimension_one_tile || !any_dimension_too_large) { |
+ if (long_and_skinny || both_dimensions_are_small) { |
int width = std::min( |
std::max(max_untiled_content_size.width(), default_tile_size.width()), |
content_bounds.width()); |