Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(149)

Side by Side Diff: cc/trees/layer_tree_host_impl.cc

Issue 1230203007: Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: address review feedback Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 The Chromium Authors. All rights reserved. 1 // Copyright 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/trees/layer_tree_host_impl.h" 5 #include "cc/trees/layer_tree_host_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <map> 9 #include <map>
10 #include <set> 10 #include <set>
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
145 // that time interval, and then uploads should have a chance to be processed. 145 // that time interval, and then uploads should have a chance to be processed.
146 size_t ms_per_frame = std::floor(1000.0 / refresh_rate); 146 size_t ms_per_frame = std::floor(1000.0 / refresh_rate);
147 size_t max_transfer_buffer_usage_bytes = 147 size_t max_transfer_buffer_usage_bytes =
148 ms_per_frame * kMaxBytesUploadedPerMs; 148 ms_per_frame * kMaxBytesUploadedPerMs;
149 149
150 // The context may request a lower limit based on the device capabilities. 150 // The context may request a lower limit based on the device capabilities.
151 return std::min(context_capabilities.max_transfer_buffer_usage_bytes, 151 return std::min(context_capabilities.max_transfer_buffer_usage_bytes,
152 max_transfer_buffer_usage_bytes); 152 max_transfer_buffer_usage_bytes);
153 } 153 }
154 154
155 size_t GetMaxStagingResourceCount() {
156 // Upper bound for number of staging resource to allow.
157 return 32;
158 }
159
160 size_t GetDefaultMemoryAllocationLimit() { 155 size_t GetDefaultMemoryAllocationLimit() {
161 // TODO(ccameron): (http://crbug.com/137094) This 64MB default is a straggler 156 // TODO(ccameron): (http://crbug.com/137094) This 64MB default is a straggler
162 // from the old texture manager and is just to give us a default memory 157 // from the old texture manager and is just to give us a default memory
163 // allocation before we get a callback from the GPU memory manager. We 158 // allocation before we get a callback from the GPU memory manager. We
164 // should probaby either: 159 // should probaby either:
165 // - wait for the callback before rendering anything instead 160 // - wait for the callback before rendering anything instead
166 // - push this into the GPU memory manager somehow. 161 // - push this into the GPU memory manager somehow.
167 return 64 * 1024 * 1024; 162 return 64 * 1024 * 1024;
168 } 163 }
169 164
(...skipping 1057 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 // TODO(reveman): We should avoid keeping around unused resources if 1222 // TODO(reveman): We should avoid keeping around unused resources if
1228 // possible. crbug.com/224475 1223 // possible. crbug.com/224475
1229 // Unused limit is calculated from soft-limit, as hard-limit may 1224 // Unused limit is calculated from soft-limit, as hard-limit may
1230 // be very high and shouldn't typically be exceeded. 1225 // be very high and shouldn't typically be exceeded.
1231 size_t unused_memory_limit_in_bytes = static_cast<size_t>( 1226 size_t unused_memory_limit_in_bytes = static_cast<size_t>(
1232 (static_cast<int64>(global_tile_state_.soft_memory_limit_in_bytes) * 1227 (static_cast<int64>(global_tile_state_.soft_memory_limit_in_bytes) *
1233 settings_.max_unused_resource_memory_percentage) / 1228 settings_.max_unused_resource_memory_percentage) /
1234 100); 1229 100);
1235 1230
1236 DCHECK(resource_pool_); 1231 DCHECK(resource_pool_);
1237 resource_pool_->CheckBusyResources(false); 1232 resource_pool_->CheckBusyResources();
1238 // Soft limit is used for resource pool such that memory returns to soft 1233 // Soft limit is used for resource pool such that memory returns to soft
1239 // limit after going over. 1234 // limit after going over.
1240 resource_pool_->SetResourceUsageLimits( 1235 resource_pool_->SetResourceUsageLimits(
1241 global_tile_state_.soft_memory_limit_in_bytes, 1236 global_tile_state_.soft_memory_limit_in_bytes,
1242 unused_memory_limit_in_bytes, 1237 unused_memory_limit_in_bytes,
1243 global_tile_state_.num_resources_limit); 1238 global_tile_state_.num_resources_limit);
1244 1239
1245 // Release all staging resources when invisible.
1246 if (staging_resource_pool_) {
1247 staging_resource_pool_->CheckBusyResources(false);
1248 staging_resource_pool_->SetResourceUsageLimits(
1249 std::numeric_limits<size_t>::max(),
1250 std::numeric_limits<size_t>::max(),
1251 visible_ ? GetMaxStagingResourceCount() : 0);
1252 }
1253
1254 DidModifyTilePriorities(); 1240 DidModifyTilePriorities();
1255 } 1241 }
1256 1242
1257 void LayerTreeHostImpl::DidModifyTilePriorities() { 1243 void LayerTreeHostImpl::DidModifyTilePriorities() {
1258 // Mark priorities as dirty and schedule a PrepareTiles(). 1244 // Mark priorities as dirty and schedule a PrepareTiles().
1259 tile_priorities_dirty_ = true; 1245 tile_priorities_dirty_ = true;
1260 client_->SetNeedsPrepareTilesOnImplThread(); 1246 client_->SetNeedsPrepareTilesOnImplThread();
1261 } 1247 }
1262 1248
1263 scoped_ptr<RasterTilePriorityQueue> LayerTreeHostImpl::BuildRasterQueue( 1249 scoped_ptr<RasterTilePriorityQueue> LayerTreeHostImpl::BuildRasterQueue(
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1302 // don't need to schedule a draw here. Just stop WillBeginImplFrame() from 1288 // don't need to schedule a draw here. Just stop WillBeginImplFrame() from
1303 // causing optimistic requests to draw a frame. 1289 // causing optimistic requests to draw a frame.
1304 is_likely_to_require_a_draw_ = false; 1290 is_likely_to_require_a_draw_ = false;
1305 1291
1306 client_->NotifyReadyToDraw(); 1292 client_->NotifyReadyToDraw();
1307 } 1293 }
1308 1294
1309 void LayerTreeHostImpl::NotifyAllTileTasksCompleted() { 1295 void LayerTreeHostImpl::NotifyAllTileTasksCompleted() {
1310 // The tile tasks started by the most recent call to PrepareTiles have 1296 // The tile tasks started by the most recent call to PrepareTiles have
1311 // completed. Now is a good time to free resources if necessary. 1297 // completed. Now is a good time to free resources if necessary.
1312 if (output_surface_ && global_tile_state_.hard_memory_limit_in_bytes == 0) { 1298 if (global_tile_state_.hard_memory_limit_in_bytes == 0) {
vmpstr 2015/07/23 17:51:07 nit: This is a no-op change? :P
reveman 2015/07/23 18:40:40 Good catch. This was a leftover from some changes
1313 output_surface_->SetWorkerContextShouldAggressivelyFreeResources( 1299 if (output_surface_) {
1314 true /* aggressively_free_resources */); 1300 output_surface_->SetWorkerContextShouldAggressivelyFreeResources(
1301 true /* aggressively_free_resources */);
1302 }
1315 } 1303 }
1316 } 1304 }
1317 1305
1318 void LayerTreeHostImpl::NotifyTileStateChanged(const Tile* tile) { 1306 void LayerTreeHostImpl::NotifyTileStateChanged(const Tile* tile) {
1319 TRACE_EVENT0("cc", "LayerTreeHostImpl::NotifyTileStateChanged"); 1307 TRACE_EVENT0("cc", "LayerTreeHostImpl::NotifyTileStateChanged");
1320 1308
1321 if (active_tree_) { 1309 if (active_tree_) {
1322 LayerImpl* layer_impl = 1310 LayerImpl* layer_impl =
1323 active_tree_->FindActiveTreeLayerById(tile->layer_id()); 1311 active_tree_->FindActiveTreeLayerById(tile->layer_id());
1324 if (layer_impl) 1312 if (layer_impl)
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
1457 1445
1458 void LayerTreeHostImpl::ReclaimResources(const CompositorFrameAck* ack) { 1446 void LayerTreeHostImpl::ReclaimResources(const CompositorFrameAck* ack) {
1459 // TODO(piman): We may need to do some validation on this ack before 1447 // TODO(piman): We may need to do some validation on this ack before
1460 // processing it. 1448 // processing it.
1461 if (renderer_) 1449 if (renderer_)
1462 renderer_->ReceiveSwapBuffersAck(*ack); 1450 renderer_->ReceiveSwapBuffersAck(*ack);
1463 1451
1464 // In OOM, we now might be able to release more resources that were held 1452 // In OOM, we now might be able to release more resources that were held
1465 // because they were exported. 1453 // because they were exported.
1466 if (resource_pool_) { 1454 if (resource_pool_) {
1467 resource_pool_->CheckBusyResources(false); 1455 resource_pool_->CheckBusyResources();
1468 resource_pool_->ReduceResourceUsage(); 1456 resource_pool_->ReduceResourceUsage();
1469 } 1457 }
1470 // If we're not visible, we likely released resources, so we want to 1458 // If we're not visible, we likely released resources, so we want to
1471 // aggressively flush here to make sure those DeleteTextures make it to the 1459 // aggressively flush here to make sure those DeleteTextures make it to the
1472 // GPU process to free up the memory. 1460 // GPU process to free up the memory.
1473 if (output_surface_->context_provider() && !visible_) { 1461 if (output_surface_->context_provider() && !visible_) {
1474 output_surface_->context_provider()->ContextGL()->ShallowFlushCHROMIUM(); 1462 output_surface_->context_provider()->ContextGL()->ShallowFlushCHROMIUM();
1475 } 1463 }
1476 } 1464 }
1477 1465
(...skipping 597 matching lines...) Expand 10 before | Expand all | Expand 10 after
2075 // See note in LayerTreeImpl::UpdateDrawProperties. Renderer needs to be 2063 // See note in LayerTreeImpl::UpdateDrawProperties. Renderer needs to be
2076 // initialized to get max texture size. Also, after releasing resources, 2064 // initialized to get max texture size. Also, after releasing resources,
2077 // trees need another update to generate new ones. 2065 // trees need another update to generate new ones.
2078 active_tree_->set_needs_update_draw_properties(); 2066 active_tree_->set_needs_update_draw_properties();
2079 if (pending_tree_) 2067 if (pending_tree_)
2080 pending_tree_->set_needs_update_draw_properties(); 2068 pending_tree_->set_needs_update_draw_properties();
2081 client_->UpdateRendererCapabilitiesOnImplThread(); 2069 client_->UpdateRendererCapabilitiesOnImplThread();
2082 } 2070 }
2083 2071
2084 void LayerTreeHostImpl::CreateTileManagerResources() { 2072 void LayerTreeHostImpl::CreateTileManagerResources() {
2085 CreateResourceAndTileTaskWorkerPool(&tile_task_worker_pool_, &resource_pool_, 2073 CreateResourceAndTileTaskWorkerPool(&tile_task_worker_pool_, &resource_pool_);
2086 &staging_resource_pool_);
2087 // TODO(vmpstr): Initialize tile task limit at ctor time. 2074 // TODO(vmpstr): Initialize tile task limit at ctor time.
2088 tile_manager_->SetResources( 2075 tile_manager_->SetResources(
2089 resource_pool_.get(), tile_task_worker_pool_->AsTileTaskRunner(), 2076 resource_pool_.get(), tile_task_worker_pool_->AsTileTaskRunner(),
2090 is_synchronous_single_threaded_ ? std::numeric_limits<size_t>::max() 2077 is_synchronous_single_threaded_ ? std::numeric_limits<size_t>::max()
2091 : settings_.scheduled_raster_task_limit); 2078 : settings_.scheduled_raster_task_limit);
2092 UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy()); 2079 UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy());
2093 } 2080 }
2094 2081
2095 void LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool( 2082 void LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool(
2096 scoped_ptr<TileTaskWorkerPool>* tile_task_worker_pool, 2083 scoped_ptr<TileTaskWorkerPool>* tile_task_worker_pool,
2097 scoped_ptr<ResourcePool>* resource_pool, 2084 scoped_ptr<ResourcePool>* resource_pool) {
2098 scoped_ptr<ResourcePool>* staging_resource_pool) {
2099 DCHECK(GetTaskRunner()); 2085 DCHECK(GetTaskRunner());
2100 // TODO(vmpstr): Make this a DCHECK (or remove) when crbug.com/419086 is 2086 // TODO(vmpstr): Make this a DCHECK (or remove) when crbug.com/419086 is
2101 // resolved. 2087 // resolved.
2102 CHECK(resource_provider_); 2088 CHECK(resource_provider_);
2103 2089
2104 // Pass the single-threaded synchronous task graph runner to the worker pool 2090 // Pass the single-threaded synchronous task graph runner to the worker pool
2105 // if we're in synchronous single-threaded mode. 2091 // if we're in synchronous single-threaded mode.
2106 TaskGraphRunner* task_graph_runner = task_graph_runner_; 2092 TaskGraphRunner* task_graph_runner = task_graph_runner_;
2107 if (is_synchronous_single_threaded_) { 2093 if (is_synchronous_single_threaded_) {
2108 DCHECK(!single_thread_synchronous_task_graph_runner_); 2094 DCHECK(!single_thread_synchronous_task_graph_runner_);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2145 if (settings_.use_zero_copy) { 2131 if (settings_.use_zero_copy) {
2146 *resource_pool = 2132 *resource_pool =
2147 ResourcePool::Create(resource_provider_.get(), image_target); 2133 ResourcePool::Create(resource_provider_.get(), image_target);
2148 2134
2149 *tile_task_worker_pool = ZeroCopyTileTaskWorkerPool::Create( 2135 *tile_task_worker_pool = ZeroCopyTileTaskWorkerPool::Create(
2150 GetTaskRunner(), task_graph_runner, resource_provider_.get()); 2136 GetTaskRunner(), task_graph_runner, resource_provider_.get());
2151 return; 2137 return;
2152 } 2138 }
2153 2139
2154 if (settings_.use_one_copy) { 2140 if (settings_.use_one_copy) {
2155 // Synchronous single-threaded mode depends on tiles being ready to
2156 // draw when raster is complete. Therefore, it must use one of zero
2157 // copy, software raster, or GPU raster.
2158 DCHECK(!is_synchronous_single_threaded_);
2159
2160 // We need to create a staging resource pool when using copy rasterizer.
2161 *staging_resource_pool =
2162 ResourcePool::Create(resource_provider_.get(), image_target);
2163 *resource_pool = 2141 *resource_pool =
2164 ResourcePool::Create(resource_provider_.get(), GL_TEXTURE_2D); 2142 ResourcePool::Create(resource_provider_.get(), GL_TEXTURE_2D);
2165 2143
2166 int max_copy_texture_chromium_size = 2144 int max_copy_texture_chromium_size =
2167 context_provider->ContextCapabilities() 2145 context_provider->ContextCapabilities()
2168 .gpu.max_copy_texture_chromium_size; 2146 .gpu.max_copy_texture_chromium_size;
2169 2147
2170 *tile_task_worker_pool = OneCopyTileTaskWorkerPool::Create( 2148 *tile_task_worker_pool = OneCopyTileTaskWorkerPool::Create(
2171 GetTaskRunner(), task_graph_runner, context_provider, 2149 GetTaskRunner(), task_graph_runner, context_provider,
2172 resource_provider_.get(), staging_resource_pool_.get(), 2150 resource_provider_.get(), max_copy_texture_chromium_size,
2173 max_copy_texture_chromium_size, 2151 settings_.use_persistent_map_for_gpu_memory_buffers, image_target,
2174 settings_.use_persistent_map_for_gpu_memory_buffers); 2152 settings_.max_staging_buffers);
2175 return; 2153 return;
2176 } 2154 }
2177 2155
2178 // Synchronous single-threaded mode depends on tiles being ready to 2156 // Synchronous single-threaded mode depends on tiles being ready to
2179 // draw when raster is complete. Therefore, it must use one of zero 2157 // draw when raster is complete. Therefore, it must use one of zero
2180 // copy, software raster, or GPU raster (in the branches above). 2158 // copy, software raster, or GPU raster (in the branches above).
2181 DCHECK(!is_synchronous_single_threaded_); 2159 DCHECK(!is_synchronous_single_threaded_);
2182 2160
2183 *resource_pool = ResourcePool::Create( 2161 *resource_pool = ResourcePool::Create(
2184 resource_provider_.get(), GL_TEXTURE_2D); 2162 resource_provider_.get(), GL_TEXTURE_2D);
(...skipping 22 matching lines...) Expand all
2207 void LayerTreeHostImpl::PostFrameTimingEvents( 2185 void LayerTreeHostImpl::PostFrameTimingEvents(
2208 scoped_ptr<FrameTimingTracker::CompositeTimingSet> composite_events, 2186 scoped_ptr<FrameTimingTracker::CompositeTimingSet> composite_events,
2209 scoped_ptr<FrameTimingTracker::MainFrameTimingSet> main_frame_events) { 2187 scoped_ptr<FrameTimingTracker::MainFrameTimingSet> main_frame_events) {
2210 client_->PostFrameTimingEventsOnImplThread(composite_events.Pass(), 2188 client_->PostFrameTimingEventsOnImplThread(composite_events.Pass(),
2211 main_frame_events.Pass()); 2189 main_frame_events.Pass());
2212 } 2190 }
2213 2191
2214 void LayerTreeHostImpl::CleanUpTileManager() { 2192 void LayerTreeHostImpl::CleanUpTileManager() {
2215 tile_manager_->FinishTasksAndCleanUp(); 2193 tile_manager_->FinishTasksAndCleanUp();
2216 resource_pool_ = nullptr; 2194 resource_pool_ = nullptr;
2217 staging_resource_pool_ = nullptr;
2218 tile_task_worker_pool_ = nullptr; 2195 tile_task_worker_pool_ = nullptr;
2219 single_thread_synchronous_task_graph_runner_ = nullptr; 2196 single_thread_synchronous_task_graph_runner_ = nullptr;
2220 } 2197 }
2221 2198
2222 bool LayerTreeHostImpl::InitializeRenderer( 2199 bool LayerTreeHostImpl::InitializeRenderer(
2223 scoped_ptr<OutputSurface> output_surface) { 2200 scoped_ptr<OutputSurface> output_surface) {
2224 TRACE_EVENT0("cc", "LayerTreeHostImpl::InitializeRenderer"); 2201 TRACE_EVENT0("cc", "LayerTreeHostImpl::InitializeRenderer");
2225 2202
2226 // Since we will create a new resource provider, we cannot continue to use 2203 // Since we will create a new resource provider, we cannot continue to use
2227 // the old resources (i.e. render_surfaces and texture IDs). Clear them 2204 // the old resources (i.e. render_surfaces and texture IDs). Clear them
(...skipping 12 matching lines...) Expand all
2240 // point). 2217 // point).
2241 return false; 2218 return false;
2242 } 2219 }
2243 2220
2244 output_surface_ = output_surface.Pass(); 2221 output_surface_ = output_surface.Pass();
2245 resource_provider_ = ResourceProvider::Create( 2222 resource_provider_ = ResourceProvider::Create(
2246 output_surface_.get(), shared_bitmap_manager_, gpu_memory_buffer_manager_, 2223 output_surface_.get(), shared_bitmap_manager_, gpu_memory_buffer_manager_,
2247 proxy_->blocking_main_thread_task_runner(), 2224 proxy_->blocking_main_thread_task_runner(),
2248 settings_.renderer_settings.highp_threshold_min, 2225 settings_.renderer_settings.highp_threshold_min,
2249 settings_.renderer_settings.use_rgba_4444_textures, 2226 settings_.renderer_settings.use_rgba_4444_textures,
2250 settings_.renderer_settings.texture_id_allocation_chunk_size, 2227 settings_.renderer_settings.texture_id_allocation_chunk_size);
2251 settings_.use_persistent_map_for_gpu_memory_buffers);
2252 2228
2253 CreateAndSetRenderer(); 2229 CreateAndSetRenderer();
2254 2230
2255 // Since the new renderer may be capable of MSAA, update status here. 2231 // Since the new renderer may be capable of MSAA, update status here.
2256 UpdateGpuRasterizationStatus(); 2232 UpdateGpuRasterizationStatus();
2257 2233
2258 CreateTileManagerResources(); 2234 CreateTileManagerResources();
2259 RecreateTreeResources(); 2235 RecreateTreeResources();
2260 2236
2261 // Initialize vsync parameters to sane values. 2237 // Initialize vsync parameters to sane values.
(...skipping 1354 matching lines...) Expand 10 before | Expand all | Expand 10 after
3616 if (active_tree()) { 3592 if (active_tree()) {
3617 LayerAnimationValueProvider* layer = active_tree()->LayerById(layer_id); 3593 LayerAnimationValueProvider* layer = active_tree()->LayerById(layer_id);
3618 if (layer) 3594 if (layer)
3619 return layer->ScrollOffsetForAnimation(); 3595 return layer->ScrollOffsetForAnimation();
3620 } 3596 }
3621 3597
3622 return gfx::ScrollOffset(); 3598 return gfx::ScrollOffset();
3623 } 3599 }
3624 3600
3625 } // namespace cc 3601 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698