| OLD | NEW |
| (Empty) |
| 1 // Copyright 2011 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "cc/trees/layer_tree_host.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <stack> | |
| 9 #include <string> | |
| 10 | |
| 11 #include "base/atomic_sequence_num.h" | |
| 12 #include "base/bind.h" | |
| 13 #include "base/command_line.h" | |
| 14 #include "base/message_loop/message_loop.h" | |
| 15 #include "base/metrics/histogram.h" | |
| 16 #include "base/stl_util.h" | |
| 17 #include "base/strings/string_number_conversions.h" | |
| 18 #include "base/trace_event/trace_event.h" | |
| 19 #include "base/trace_event/trace_event_argument.h" | |
| 20 #include "cc/animation/animation_registrar.h" | |
| 21 #include "cc/animation/layer_animation_controller.h" | |
| 22 #include "cc/base/math_util.h" | |
| 23 #include "cc/debug/devtools_instrumentation.h" | |
| 24 #include "cc/debug/frame_viewer_instrumentation.h" | |
| 25 #include "cc/debug/rendering_stats_instrumentation.h" | |
| 26 #include "cc/input/layer_selection_bound.h" | |
| 27 #include "cc/input/page_scale_animation.h" | |
| 28 #include "cc/input/top_controls_manager.h" | |
| 29 #include "cc/layers/heads_up_display_layer.h" | |
| 30 #include "cc/layers/heads_up_display_layer_impl.h" | |
| 31 #include "cc/layers/layer.h" | |
| 32 #include "cc/layers/layer_iterator.h" | |
| 33 #include "cc/layers/painted_scrollbar_layer.h" | |
| 34 #include "cc/layers/render_surface.h" | |
| 35 #include "cc/resources/prioritized_resource_manager.h" | |
| 36 #include "cc/resources/ui_resource_request.h" | |
| 37 #include "cc/scheduler/begin_frame_source.h" | |
| 38 #include "cc/trees/layer_tree_host_client.h" | |
| 39 #include "cc/trees/layer_tree_host_common.h" | |
| 40 #include "cc/trees/layer_tree_host_impl.h" | |
| 41 #include "cc/trees/layer_tree_impl.h" | |
| 42 #include "cc/trees/occlusion_tracker.h" | |
| 43 #include "cc/trees/single_thread_proxy.h" | |
| 44 #include "cc/trees/thread_proxy.h" | |
| 45 #include "cc/trees/tree_synchronizer.h" | |
| 46 #include "ui/gfx/geometry/size_conversions.h" | |
| 47 #include "ui/gfx/geometry/vector2d_conversions.h" | |
| 48 | |
| 49 namespace { | |
| 50 static base::StaticAtomicSequenceNumber s_layer_tree_host_sequence_number; | |
| 51 } | |
| 52 | |
| 53 namespace cc { | |
| 54 | |
| 55 scoped_ptr<LayerTreeHost> LayerTreeHost::CreateThreaded( | |
| 56 LayerTreeHostClient* client, | |
| 57 SharedBitmapManager* shared_bitmap_manager, | |
| 58 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, | |
| 59 TaskGraphRunner* task_graph_runner, | |
| 60 const LayerTreeSettings& settings, | |
| 61 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, | |
| 62 scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner, | |
| 63 scoped_ptr<BeginFrameSource> external_begin_frame_source) { | |
| 64 DCHECK(main_task_runner.get()); | |
| 65 DCHECK(impl_task_runner.get()); | |
| 66 scoped_ptr<LayerTreeHost> layer_tree_host(new LayerTreeHost( | |
| 67 client, shared_bitmap_manager, gpu_memory_buffer_manager, | |
| 68 task_graph_runner, settings)); | |
| 69 layer_tree_host->InitializeThreaded(main_task_runner, | |
| 70 impl_task_runner, | |
| 71 external_begin_frame_source.Pass()); | |
| 72 return layer_tree_host.Pass(); | |
| 73 } | |
| 74 | |
| 75 scoped_ptr<LayerTreeHost> LayerTreeHost::CreateSingleThreaded( | |
| 76 LayerTreeHostClient* client, | |
| 77 LayerTreeHostSingleThreadClient* single_thread_client, | |
| 78 SharedBitmapManager* shared_bitmap_manager, | |
| 79 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, | |
| 80 TaskGraphRunner* task_graph_runner, | |
| 81 const LayerTreeSettings& settings, | |
| 82 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, | |
| 83 scoped_ptr<BeginFrameSource> external_begin_frame_source) { | |
| 84 scoped_ptr<LayerTreeHost> layer_tree_host(new LayerTreeHost( | |
| 85 client, shared_bitmap_manager, gpu_memory_buffer_manager, | |
| 86 task_graph_runner, settings)); | |
| 87 layer_tree_host->InitializeSingleThreaded(single_thread_client, | |
| 88 main_task_runner, | |
| 89 external_begin_frame_source.Pass()); | |
| 90 return layer_tree_host.Pass(); | |
| 91 } | |
| 92 | |
| 93 LayerTreeHost::LayerTreeHost( | |
| 94 LayerTreeHostClient* client, | |
| 95 SharedBitmapManager* shared_bitmap_manager, | |
| 96 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, | |
| 97 TaskGraphRunner* task_graph_runner, | |
| 98 const LayerTreeSettings& settings) | |
| 99 : micro_benchmark_controller_(this), | |
| 100 next_ui_resource_id_(1), | |
| 101 inside_begin_main_frame_(false), | |
| 102 needs_full_tree_sync_(true), | |
| 103 client_(client), | |
| 104 source_frame_number_(0), | |
| 105 rendering_stats_instrumentation_(RenderingStatsInstrumentation::Create()), | |
| 106 output_surface_lost_(true), | |
| 107 settings_(settings), | |
| 108 debug_state_(settings.initial_debug_state), | |
| 109 top_controls_shrink_blink_size_(false), | |
| 110 top_controls_height_(0.f), | |
| 111 top_controls_shown_ratio_(0.f), | |
| 112 device_scale_factor_(1.f), | |
| 113 visible_(true), | |
| 114 page_scale_factor_(1.f), | |
| 115 min_page_scale_factor_(1.f), | |
| 116 max_page_scale_factor_(1.f), | |
| 117 has_gpu_rasterization_trigger_(false), | |
| 118 content_is_suitable_for_gpu_rasterization_(true), | |
| 119 gpu_rasterization_histogram_recorded_(false), | |
| 120 background_color_(SK_ColorWHITE), | |
| 121 has_transparent_background_(false), | |
| 122 partial_texture_update_requests_(0), | |
| 123 did_complete_scale_animation_(false), | |
| 124 in_paint_layer_contents_(false), | |
| 125 id_(s_layer_tree_host_sequence_number.GetNext() + 1), | |
| 126 next_commit_forces_redraw_(false), | |
| 127 shared_bitmap_manager_(shared_bitmap_manager), | |
| 128 gpu_memory_buffer_manager_(gpu_memory_buffer_manager), | |
| 129 task_graph_runner_(task_graph_runner), | |
| 130 surface_id_namespace_(0u), | |
| 131 next_surface_sequence_(1u) { | |
| 132 if (settings_.accelerated_animation_enabled) | |
| 133 animation_registrar_ = AnimationRegistrar::Create(); | |
| 134 rendering_stats_instrumentation_->set_record_rendering_stats( | |
| 135 debug_state_.RecordRenderingStats()); | |
| 136 } | |
| 137 | |
| 138 void LayerTreeHost::InitializeThreaded( | |
| 139 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, | |
| 140 scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner, | |
| 141 scoped_ptr<BeginFrameSource> external_begin_frame_source) { | |
| 142 InitializeProxy(ThreadProxy::Create(this, | |
| 143 main_task_runner, | |
| 144 impl_task_runner, | |
| 145 external_begin_frame_source.Pass())); | |
| 146 } | |
| 147 | |
| 148 void LayerTreeHost::InitializeSingleThreaded( | |
| 149 LayerTreeHostSingleThreadClient* single_thread_client, | |
| 150 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, | |
| 151 scoped_ptr<BeginFrameSource> external_begin_frame_source) { | |
| 152 InitializeProxy( | |
| 153 SingleThreadProxy::Create(this, | |
| 154 single_thread_client, | |
| 155 main_task_runner, | |
| 156 external_begin_frame_source.Pass())); | |
| 157 } | |
| 158 | |
| 159 void LayerTreeHost::InitializeForTesting(scoped_ptr<Proxy> proxy_for_testing) { | |
| 160 InitializeProxy(proxy_for_testing.Pass()); | |
| 161 } | |
| 162 | |
| 163 void LayerTreeHost::InitializeProxy(scoped_ptr<Proxy> proxy) { | |
| 164 TRACE_EVENT0("cc", "LayerTreeHost::InitializeForReal"); | |
| 165 | |
| 166 proxy_ = proxy.Pass(); | |
| 167 proxy_->Start(); | |
| 168 if (settings_.accelerated_animation_enabled) { | |
| 169 animation_registrar_->set_supports_scroll_animations( | |
| 170 proxy_->SupportsImplScrolling()); | |
| 171 } | |
| 172 } | |
| 173 | |
| 174 LayerTreeHost::~LayerTreeHost() { | |
| 175 TRACE_EVENT0("cc", "LayerTreeHost::~LayerTreeHost"); | |
| 176 | |
| 177 if (root_layer_.get()) | |
| 178 root_layer_->SetLayerTreeHost(NULL); | |
| 179 | |
| 180 DCHECK(swap_promise_monitor_.empty()); | |
| 181 | |
| 182 BreakSwapPromises(SwapPromise::COMMIT_FAILS); | |
| 183 | |
| 184 if (proxy_) { | |
| 185 DCHECK(proxy_->IsMainThread()); | |
| 186 proxy_->Stop(); | |
| 187 } | |
| 188 | |
| 189 // We must clear any pointers into the layer tree prior to destroying it. | |
| 190 RegisterViewportLayers(NULL, NULL, NULL, NULL); | |
| 191 | |
| 192 if (root_layer_.get()) { | |
| 193 // The layer tree must be destroyed before the layer tree host. We've | |
| 194 // made a contract with our animation controllers that the registrar | |
| 195 // will outlive them, and we must make good. | |
| 196 root_layer_ = NULL; | |
| 197 } | |
| 198 } | |
| 199 | |
| 200 void LayerTreeHost::SetLayerTreeHostClientReady() { | |
| 201 proxy_->SetLayerTreeHostClientReady(); | |
| 202 } | |
| 203 | |
| 204 void LayerTreeHost::DeleteContentsTexturesOnImplThread( | |
| 205 ResourceProvider* resource_provider) { | |
| 206 DCHECK(proxy_->IsImplThread()); | |
| 207 if (contents_texture_manager_) | |
| 208 contents_texture_manager_->ClearAllMemory(resource_provider); | |
| 209 } | |
| 210 | |
| 211 void LayerTreeHost::WillBeginMainFrame() { | |
| 212 devtools_instrumentation::WillBeginMainThreadFrame(id(), | |
| 213 source_frame_number()); | |
| 214 client_->WillBeginMainFrame(); | |
| 215 } | |
| 216 | |
| 217 void LayerTreeHost::DidBeginMainFrame() { | |
| 218 client_->DidBeginMainFrame(); | |
| 219 } | |
| 220 | |
| 221 void LayerTreeHost::BeginMainFrameNotExpectedSoon() { | |
| 222 client_->BeginMainFrameNotExpectedSoon(); | |
| 223 } | |
| 224 | |
| 225 void LayerTreeHost::BeginMainFrame(const BeginFrameArgs& args) { | |
| 226 inside_begin_main_frame_ = true; | |
| 227 client_->BeginMainFrame(args); | |
| 228 inside_begin_main_frame_ = false; | |
| 229 } | |
| 230 | |
| 231 void LayerTreeHost::DidStopFlinging() { | |
| 232 proxy_->MainThreadHasStoppedFlinging(); | |
| 233 } | |
| 234 | |
| 235 void LayerTreeHost::Layout() { | |
| 236 client_->Layout(); | |
| 237 } | |
| 238 | |
| 239 void LayerTreeHost::BeginCommitOnImplThread(LayerTreeHostImpl* host_impl) { | |
| 240 DCHECK(proxy_->IsImplThread()); | |
| 241 TRACE_EVENT0("cc", "LayerTreeHost::CommitTo"); | |
| 242 } | |
| 243 | |
| 244 // This function commits the LayerTreeHost to an impl tree. When modifying | |
| 245 // this function, keep in mind that the function *runs* on the impl thread! Any | |
| 246 // code that is logically a main thread operation, e.g. deletion of a Layer, | |
| 247 // should be delayed until the LayerTreeHost::CommitComplete, which will run | |
| 248 // after the commit, but on the main thread. | |
| 249 void LayerTreeHost::FinishCommitOnImplThread(LayerTreeHostImpl* host_impl) { | |
| 250 DCHECK(proxy_->IsImplThread()); | |
| 251 | |
| 252 // If there are linked evicted backings, these backings' resources may be put | |
| 253 // into the impl tree, so we can't draw yet. Determine this before clearing | |
| 254 // all evicted backings. | |
| 255 bool new_impl_tree_has_no_evicted_resources = false; | |
| 256 if (contents_texture_manager_) { | |
| 257 new_impl_tree_has_no_evicted_resources = | |
| 258 !contents_texture_manager_->LinkedEvictedBackingsExist(); | |
| 259 | |
| 260 // If the memory limit has been increased since this now-finishing | |
| 261 // commit began, and the extra now-available memory would have been used, | |
| 262 // then request another commit. | |
| 263 if (contents_texture_manager_->MaxMemoryLimitBytes() < | |
| 264 host_impl->memory_allocation_limit_bytes() && | |
| 265 contents_texture_manager_->MaxMemoryLimitBytes() < | |
| 266 contents_texture_manager_->MaxMemoryNeededBytes()) { | |
| 267 host_impl->SetNeedsCommit(); | |
| 268 } | |
| 269 | |
| 270 host_impl->set_max_memory_needed_bytes( | |
| 271 contents_texture_manager_->MaxMemoryNeededBytes()); | |
| 272 | |
| 273 contents_texture_manager_->UpdateBackingsState( | |
| 274 host_impl->resource_provider()); | |
| 275 contents_texture_manager_->ReduceMemory(host_impl->resource_provider()); | |
| 276 } | |
| 277 | |
| 278 bool is_new_trace; | |
| 279 TRACE_EVENT_IS_NEW_TRACE(&is_new_trace); | |
| 280 if (is_new_trace && | |
| 281 frame_viewer_instrumentation::IsTracingLayerTreeSnapshots() && | |
| 282 root_layer()) { | |
| 283 LayerTreeHostCommon::CallFunctionForSubtree( | |
| 284 root_layer(), [](Layer* layer) { layer->DidBeginTracing(); }); | |
| 285 } | |
| 286 | |
| 287 LayerTreeImpl* sync_tree = host_impl->sync_tree(); | |
| 288 | |
| 289 if (next_commit_forces_redraw_) { | |
| 290 sync_tree->ForceRedrawNextActivation(); | |
| 291 next_commit_forces_redraw_ = false; | |
| 292 } | |
| 293 | |
| 294 sync_tree->set_source_frame_number(source_frame_number()); | |
| 295 | |
| 296 if (needs_full_tree_sync_) { | |
| 297 sync_tree->SetRootLayer(TreeSynchronizer::SynchronizeTrees( | |
| 298 root_layer(), sync_tree->DetachLayerTree(), sync_tree)); | |
| 299 } | |
| 300 sync_tree->set_needs_full_tree_sync(needs_full_tree_sync_); | |
| 301 needs_full_tree_sync_ = false; | |
| 302 | |
| 303 if (hud_layer_.get()) { | |
| 304 LayerImpl* hud_impl = LayerTreeHostCommon::FindLayerInSubtree( | |
| 305 sync_tree->root_layer(), hud_layer_->id()); | |
| 306 sync_tree->set_hud_layer(static_cast<HeadsUpDisplayLayerImpl*>(hud_impl)); | |
| 307 } else { | |
| 308 sync_tree->set_hud_layer(NULL); | |
| 309 } | |
| 310 | |
| 311 sync_tree->set_background_color(background_color_); | |
| 312 sync_tree->set_has_transparent_background(has_transparent_background_); | |
| 313 | |
| 314 if (page_scale_layer_.get() && inner_viewport_scroll_layer_.get()) { | |
| 315 sync_tree->SetViewportLayersFromIds( | |
| 316 overscroll_elasticity_layer_.get() ? overscroll_elasticity_layer_->id() | |
| 317 : Layer::INVALID_ID, | |
| 318 page_scale_layer_->id(), inner_viewport_scroll_layer_->id(), | |
| 319 outer_viewport_scroll_layer_.get() ? outer_viewport_scroll_layer_->id() | |
| 320 : Layer::INVALID_ID); | |
| 321 DCHECK(inner_viewport_scroll_layer_->IsContainerForFixedPositionLayers()); | |
| 322 } else { | |
| 323 sync_tree->ClearViewportLayers(); | |
| 324 } | |
| 325 | |
| 326 sync_tree->RegisterSelection(selection_start_, selection_end_); | |
| 327 | |
| 328 sync_tree->PushPageScaleFromMainThread( | |
| 329 page_scale_factor_, min_page_scale_factor_, max_page_scale_factor_); | |
| 330 sync_tree->elastic_overscroll()->PushFromMainThread(elastic_overscroll_); | |
| 331 if (sync_tree->IsActiveTree()) | |
| 332 sync_tree->elastic_overscroll()->PushPendingToActive(); | |
| 333 | |
| 334 sync_tree->PassSwapPromises(&swap_promise_list_); | |
| 335 | |
| 336 sync_tree->set_top_controls_shrink_blink_size( | |
| 337 top_controls_shrink_blink_size_); | |
| 338 sync_tree->set_top_controls_height(top_controls_height_); | |
| 339 sync_tree->PushTopControlsFromMainThread(top_controls_shown_ratio_); | |
| 340 | |
| 341 host_impl->SetUseGpuRasterization(UseGpuRasterization()); | |
| 342 host_impl->set_gpu_rasterization_status(GetGpuRasterizationStatus()); | |
| 343 RecordGpuRasterizationHistogram(); | |
| 344 | |
| 345 host_impl->SetViewportSize(device_viewport_size_); | |
| 346 host_impl->SetDeviceScaleFactor(device_scale_factor_); | |
| 347 host_impl->SetDebugState(debug_state_); | |
| 348 if (pending_page_scale_animation_) { | |
| 349 sync_tree->SetPendingPageScaleAnimation( | |
| 350 pending_page_scale_animation_.Pass()); | |
| 351 } | |
| 352 | |
| 353 if (!ui_resource_request_queue_.empty()) { | |
| 354 sync_tree->set_ui_resource_request_queue(ui_resource_request_queue_); | |
| 355 ui_resource_request_queue_.clear(); | |
| 356 } | |
| 357 | |
| 358 DCHECK(!sync_tree->ViewportSizeInvalid()); | |
| 359 | |
| 360 if (new_impl_tree_has_no_evicted_resources) { | |
| 361 if (sync_tree->ContentsTexturesPurged()) | |
| 362 sync_tree->ResetContentsTexturesPurged(); | |
| 363 } | |
| 364 | |
| 365 sync_tree->set_has_ever_been_drawn(false); | |
| 366 | |
| 367 { | |
| 368 TRACE_EVENT0("cc", "LayerTreeHost::PushProperties"); | |
| 369 TreeSynchronizer::PushProperties(root_layer(), sync_tree->root_layer()); | |
| 370 } | |
| 371 | |
| 372 micro_benchmark_controller_.ScheduleImplBenchmarks(host_impl); | |
| 373 } | |
| 374 | |
| 375 void LayerTreeHost::WillCommit() { | |
| 376 client_->WillCommit(); | |
| 377 } | |
| 378 | |
| 379 void LayerTreeHost::UpdateHudLayer() { | |
| 380 if (debug_state_.ShowHudInfo()) { | |
| 381 if (!hud_layer_.get()) | |
| 382 hud_layer_ = HeadsUpDisplayLayer::Create(); | |
| 383 | |
| 384 if (root_layer_.get() && !hud_layer_->parent()) | |
| 385 root_layer_->AddChild(hud_layer_); | |
| 386 } else if (hud_layer_.get()) { | |
| 387 hud_layer_->RemoveFromParent(); | |
| 388 hud_layer_ = NULL; | |
| 389 } | |
| 390 } | |
| 391 | |
| 392 void LayerTreeHost::CommitComplete() { | |
| 393 source_frame_number_++; | |
| 394 client_->DidCommit(); | |
| 395 if (did_complete_scale_animation_) { | |
| 396 client_->DidCompletePageScaleAnimation(); | |
| 397 did_complete_scale_animation_ = false; | |
| 398 } | |
| 399 } | |
| 400 | |
| 401 void LayerTreeHost::SetOutputSurface(scoped_ptr<OutputSurface> surface) { | |
| 402 TRACE_EVENT0("cc", "LayerTreeHost::SetOutputSurface"); | |
| 403 DCHECK(output_surface_lost_); | |
| 404 DCHECK(surface); | |
| 405 | |
| 406 proxy_->SetOutputSurface(surface.Pass()); | |
| 407 } | |
| 408 | |
| 409 void LayerTreeHost::RequestNewOutputSurface() { | |
| 410 client_->RequestNewOutputSurface(); | |
| 411 } | |
| 412 | |
| 413 void LayerTreeHost::DidInitializeOutputSurface() { | |
| 414 output_surface_lost_ = false; | |
| 415 | |
| 416 if (!contents_texture_manager_ && !settings_.impl_side_painting) { | |
| 417 contents_texture_manager_ = | |
| 418 PrioritizedResourceManager::Create(proxy_.get()); | |
| 419 surface_memory_placeholder_ = | |
| 420 contents_texture_manager_->CreateTexture(gfx::Size(), RGBA_8888); | |
| 421 } | |
| 422 | |
| 423 if (root_layer()) { | |
| 424 LayerTreeHostCommon::CallFunctionForSubtree( | |
| 425 root_layer(), [](Layer* layer) { layer->OnOutputSurfaceCreated(); }); | |
| 426 } | |
| 427 | |
| 428 client_->DidInitializeOutputSurface(); | |
| 429 } | |
| 430 | |
| 431 void LayerTreeHost::DidFailToInitializeOutputSurface() { | |
| 432 DCHECK(output_surface_lost_); | |
| 433 client_->DidFailToInitializeOutputSurface(); | |
| 434 } | |
| 435 | |
| 436 scoped_ptr<LayerTreeHostImpl> LayerTreeHost::CreateLayerTreeHostImpl( | |
| 437 LayerTreeHostImplClient* client) { | |
| 438 DCHECK(proxy_->IsImplThread()); | |
| 439 scoped_ptr<LayerTreeHostImpl> host_impl = LayerTreeHostImpl::Create( | |
| 440 settings_, client, proxy_.get(), rendering_stats_instrumentation_.get(), | |
| 441 shared_bitmap_manager_, gpu_memory_buffer_manager_, task_graph_runner_, | |
| 442 id_); | |
| 443 host_impl->SetUseGpuRasterization(UseGpuRasterization()); | |
| 444 shared_bitmap_manager_ = NULL; | |
| 445 gpu_memory_buffer_manager_ = NULL; | |
| 446 task_graph_runner_ = NULL; | |
| 447 top_controls_manager_weak_ptr_ = | |
| 448 host_impl->top_controls_manager()->AsWeakPtr(); | |
| 449 input_handler_weak_ptr_ = host_impl->AsWeakPtr(); | |
| 450 return host_impl.Pass(); | |
| 451 } | |
| 452 | |
| 453 void LayerTreeHost::DidLoseOutputSurface() { | |
| 454 TRACE_EVENT0("cc", "LayerTreeHost::DidLoseOutputSurface"); | |
| 455 DCHECK(proxy_->IsMainThread()); | |
| 456 | |
| 457 if (output_surface_lost_) | |
| 458 return; | |
| 459 | |
| 460 output_surface_lost_ = true; | |
| 461 SetNeedsCommit(); | |
| 462 } | |
| 463 | |
| 464 void LayerTreeHost::FinishAllRendering() { | |
| 465 proxy_->FinishAllRendering(); | |
| 466 } | |
| 467 | |
| 468 void LayerTreeHost::SetDeferCommits(bool defer_commits) { | |
| 469 proxy_->SetDeferCommits(defer_commits); | |
| 470 } | |
| 471 | |
| 472 void LayerTreeHost::SetNeedsDisplayOnAllLayers() { | |
| 473 std::stack<Layer*> layer_stack; | |
| 474 layer_stack.push(root_layer()); | |
| 475 while (!layer_stack.empty()) { | |
| 476 Layer* current_layer = layer_stack.top(); | |
| 477 layer_stack.pop(); | |
| 478 current_layer->SetNeedsDisplay(); | |
| 479 for (unsigned int i = 0; i < current_layer->children().size(); i++) { | |
| 480 layer_stack.push(current_layer->child_at(i)); | |
| 481 } | |
| 482 } | |
| 483 } | |
| 484 | |
| 485 const RendererCapabilities& LayerTreeHost::GetRendererCapabilities() const { | |
| 486 return proxy_->GetRendererCapabilities(); | |
| 487 } | |
| 488 | |
| 489 void LayerTreeHost::SetNeedsAnimate() { | |
| 490 proxy_->SetNeedsAnimate(); | |
| 491 NotifySwapPromiseMonitorsOfSetNeedsCommit(); | |
| 492 } | |
| 493 | |
| 494 void LayerTreeHost::SetNeedsUpdateLayers() { | |
| 495 proxy_->SetNeedsUpdateLayers(); | |
| 496 NotifySwapPromiseMonitorsOfSetNeedsCommit(); | |
| 497 } | |
| 498 | |
| 499 void LayerTreeHost::SetNeedsCommit() { | |
| 500 if (!prepaint_callback_.IsCancelled()) { | |
| 501 TRACE_EVENT_INSTANT0("cc", | |
| 502 "LayerTreeHost::SetNeedsCommit::cancel prepaint", | |
| 503 TRACE_EVENT_SCOPE_THREAD); | |
| 504 prepaint_callback_.Cancel(); | |
| 505 } | |
| 506 proxy_->SetNeedsCommit(); | |
| 507 NotifySwapPromiseMonitorsOfSetNeedsCommit(); | |
| 508 } | |
| 509 | |
| 510 void LayerTreeHost::SetNeedsFullTreeSync() { | |
| 511 needs_full_tree_sync_ = true; | |
| 512 SetNeedsCommit(); | |
| 513 } | |
| 514 | |
| 515 void LayerTreeHost::SetNeedsRedraw() { | |
| 516 SetNeedsRedrawRect(gfx::Rect(device_viewport_size_)); | |
| 517 } | |
| 518 | |
| 519 void LayerTreeHost::SetNeedsRedrawRect(const gfx::Rect& damage_rect) { | |
| 520 proxy_->SetNeedsRedraw(damage_rect); | |
| 521 } | |
| 522 | |
| 523 bool LayerTreeHost::CommitRequested() const { | |
| 524 return proxy_->CommitRequested(); | |
| 525 } | |
| 526 | |
| 527 bool LayerTreeHost::BeginMainFrameRequested() const { | |
| 528 return proxy_->BeginMainFrameRequested(); | |
| 529 } | |
| 530 | |
| 531 | |
| 532 void LayerTreeHost::SetNextCommitWaitsForActivation() { | |
| 533 proxy_->SetNextCommitWaitsForActivation(); | |
| 534 } | |
| 535 | |
| 536 void LayerTreeHost::SetNextCommitForcesRedraw() { | |
| 537 next_commit_forces_redraw_ = true; | |
| 538 } | |
| 539 | |
| 540 void LayerTreeHost::SetAnimationEvents( | |
| 541 scoped_ptr<AnimationEventsVector> events) { | |
| 542 DCHECK(proxy_->IsMainThread()); | |
| 543 animation_registrar_->SetAnimationEvents(events.Pass()); | |
| 544 } | |
| 545 | |
| 546 void LayerTreeHost::SetRootLayer(scoped_refptr<Layer> root_layer) { | |
| 547 if (root_layer_.get() == root_layer.get()) | |
| 548 return; | |
| 549 | |
| 550 if (root_layer_.get()) | |
| 551 root_layer_->SetLayerTreeHost(NULL); | |
| 552 root_layer_ = root_layer; | |
| 553 if (root_layer_.get()) { | |
| 554 DCHECK(!root_layer_->parent()); | |
| 555 root_layer_->SetLayerTreeHost(this); | |
| 556 } | |
| 557 | |
| 558 if (hud_layer_.get()) | |
| 559 hud_layer_->RemoveFromParent(); | |
| 560 | |
| 561 // Reset gpu rasterization flag. | |
| 562 // This flag is sticky until a new tree comes along. | |
| 563 content_is_suitable_for_gpu_rasterization_ = true; | |
| 564 gpu_rasterization_histogram_recorded_ = false; | |
| 565 | |
| 566 SetNeedsFullTreeSync(); | |
| 567 } | |
| 568 | |
| 569 void LayerTreeHost::SetDebugState(const LayerTreeDebugState& debug_state) { | |
| 570 LayerTreeDebugState new_debug_state = | |
| 571 LayerTreeDebugState::Unite(settings_.initial_debug_state, debug_state); | |
| 572 | |
| 573 if (LayerTreeDebugState::Equal(debug_state_, new_debug_state)) | |
| 574 return; | |
| 575 | |
| 576 debug_state_ = new_debug_state; | |
| 577 | |
| 578 rendering_stats_instrumentation_->set_record_rendering_stats( | |
| 579 debug_state_.RecordRenderingStats()); | |
| 580 | |
| 581 SetNeedsCommit(); | |
| 582 proxy_->SetDebugState(debug_state); | |
| 583 } | |
| 584 | |
| 585 bool LayerTreeHost::UseGpuRasterization() const { | |
| 586 if (settings_.gpu_rasterization_forced) { | |
| 587 return true; | |
| 588 } else if (settings_.gpu_rasterization_enabled) { | |
| 589 return has_gpu_rasterization_trigger_ && | |
| 590 content_is_suitable_for_gpu_rasterization_; | |
| 591 } else { | |
| 592 return false; | |
| 593 } | |
| 594 } | |
| 595 | |
| 596 GpuRasterizationStatus LayerTreeHost::GetGpuRasterizationStatus() const { | |
| 597 if (settings_.gpu_rasterization_forced) { | |
| 598 return GpuRasterizationStatus::ON_FORCED; | |
| 599 } else if (settings_.gpu_rasterization_enabled) { | |
| 600 if (!has_gpu_rasterization_trigger_) { | |
| 601 return GpuRasterizationStatus::OFF_VIEWPORT; | |
| 602 } else if (!content_is_suitable_for_gpu_rasterization_) { | |
| 603 return GpuRasterizationStatus::OFF_CONTENT; | |
| 604 } else { | |
| 605 return GpuRasterizationStatus::ON; | |
| 606 } | |
| 607 } | |
| 608 return GpuRasterizationStatus::OFF_DEVICE; | |
| 609 } | |
| 610 | |
| 611 void LayerTreeHost::SetHasGpuRasterizationTrigger(bool has_trigger) { | |
| 612 if (has_trigger == has_gpu_rasterization_trigger_) | |
| 613 return; | |
| 614 | |
| 615 has_gpu_rasterization_trigger_ = has_trigger; | |
| 616 TRACE_EVENT_INSTANT1("cc", | |
| 617 "LayerTreeHost::SetHasGpuRasterizationTrigger", | |
| 618 TRACE_EVENT_SCOPE_THREAD, | |
| 619 "has_trigger", | |
| 620 has_gpu_rasterization_trigger_); | |
| 621 } | |
| 622 | |
| 623 void LayerTreeHost::SetViewportSize(const gfx::Size& device_viewport_size) { | |
| 624 if (device_viewport_size == device_viewport_size_) | |
| 625 return; | |
| 626 | |
| 627 device_viewport_size_ = device_viewport_size; | |
| 628 | |
| 629 SetNeedsCommit(); | |
| 630 } | |
| 631 | |
| 632 void LayerTreeHost::SetTopControlsHeight(float height, bool shrink) { | |
| 633 if (top_controls_height_ == height && | |
| 634 top_controls_shrink_blink_size_ == shrink) | |
| 635 return; | |
| 636 | |
| 637 top_controls_height_ = height; | |
| 638 top_controls_shrink_blink_size_ = shrink; | |
| 639 SetNeedsCommit(); | |
| 640 } | |
| 641 | |
| 642 void LayerTreeHost::SetTopControlsShownRatio(float ratio) { | |
| 643 if (top_controls_shown_ratio_ == ratio) | |
| 644 return; | |
| 645 | |
| 646 top_controls_shown_ratio_ = ratio; | |
| 647 SetNeedsCommit(); | |
| 648 } | |
| 649 | |
| 650 void LayerTreeHost::ApplyPageScaleDeltaFromImplSide(float page_scale_delta) { | |
| 651 DCHECK(CommitRequested()); | |
| 652 page_scale_factor_ *= page_scale_delta; | |
| 653 } | |
| 654 | |
| 655 void LayerTreeHost::SetPageScaleFactorAndLimits(float page_scale_factor, | |
| 656 float min_page_scale_factor, | |
| 657 float max_page_scale_factor) { | |
| 658 if (page_scale_factor == page_scale_factor_ && | |
| 659 min_page_scale_factor == min_page_scale_factor_ && | |
| 660 max_page_scale_factor == max_page_scale_factor_) | |
| 661 return; | |
| 662 | |
| 663 page_scale_factor_ = page_scale_factor; | |
| 664 min_page_scale_factor_ = min_page_scale_factor; | |
| 665 max_page_scale_factor_ = max_page_scale_factor; | |
| 666 SetNeedsCommit(); | |
| 667 } | |
| 668 | |
| 669 void LayerTreeHost::SetVisible(bool visible) { | |
| 670 if (visible_ == visible) | |
| 671 return; | |
| 672 visible_ = visible; | |
| 673 if (!visible) | |
| 674 ReduceMemoryUsage(); | |
| 675 proxy_->SetVisible(visible); | |
| 676 } | |
| 677 | |
| 678 void LayerTreeHost::SetThrottleFrameProduction(bool throttle) { | |
| 679 proxy_->SetThrottleFrameProduction(throttle); | |
| 680 } | |
| 681 | |
| 682 void LayerTreeHost::StartPageScaleAnimation(const gfx::Vector2d& target_offset, | |
| 683 bool use_anchor, | |
| 684 float scale, | |
| 685 base::TimeDelta duration) { | |
| 686 pending_page_scale_animation_.reset( | |
| 687 new PendingPageScaleAnimation( | |
| 688 target_offset, | |
| 689 use_anchor, | |
| 690 scale, | |
| 691 duration)); | |
| 692 | |
| 693 SetNeedsCommit(); | |
| 694 } | |
| 695 | |
| 696 void LayerTreeHost::NotifyInputThrottledUntilCommit() { | |
| 697 proxy_->NotifyInputThrottledUntilCommit(); | |
| 698 } | |
| 699 | |
| 700 void LayerTreeHost::Composite(base::TimeTicks frame_begin_time) { | |
| 701 DCHECK(!proxy_->HasImplThread()); | |
| 702 // This function is only valid when not using the scheduler. | |
| 703 DCHECK(!settings_.single_thread_proxy_scheduler); | |
| 704 SingleThreadProxy* proxy = static_cast<SingleThreadProxy*>(proxy_.get()); | |
| 705 | |
| 706 SetLayerTreeHostClientReady(); | |
| 707 proxy->CompositeImmediately(frame_begin_time); | |
| 708 } | |
| 709 | |
| 710 bool LayerTreeHost::UpdateLayers(ResourceUpdateQueue* queue) { | |
| 711 DCHECK(!output_surface_lost_); | |
| 712 | |
| 713 if (!root_layer()) | |
| 714 return false; | |
| 715 | |
| 716 DCHECK(!root_layer()->parent()); | |
| 717 | |
| 718 bool result = UpdateLayers(root_layer(), queue); | |
| 719 | |
| 720 micro_benchmark_controller_.DidUpdateLayers(); | |
| 721 | |
| 722 return result || next_commit_forces_redraw_; | |
| 723 } | |
| 724 | |
| 725 void LayerTreeHost::DidCompletePageScaleAnimation() { | |
| 726 did_complete_scale_animation_ = true; | |
| 727 } | |
| 728 | |
| 729 static Layer* FindFirstScrollableLayer(Layer* layer) { | |
| 730 if (!layer) | |
| 731 return NULL; | |
| 732 | |
| 733 if (layer->scrollable()) | |
| 734 return layer; | |
| 735 | |
| 736 for (size_t i = 0; i < layer->children().size(); ++i) { | |
| 737 Layer* found = FindFirstScrollableLayer(layer->children()[i].get()); | |
| 738 if (found) | |
| 739 return found; | |
| 740 } | |
| 741 | |
| 742 return NULL; | |
| 743 } | |
| 744 | |
| 745 void LayerTreeHost::RecordGpuRasterizationHistogram() { | |
| 746 // Gpu rasterization is only supported when impl-side painting is enabled. | |
| 747 if (gpu_rasterization_histogram_recorded_ || !settings_.impl_side_painting) | |
| 748 return; | |
| 749 | |
| 750 // Record how widely gpu rasterization is enabled. | |
| 751 // This number takes device/gpu whitelisting/backlisting into account. | |
| 752 // Note that we do not consider the forced gpu rasterization mode, which is | |
| 753 // mostly used for debugging purposes. | |
| 754 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuRasterizationEnabled", | |
| 755 settings_.gpu_rasterization_enabled); | |
| 756 if (settings_.gpu_rasterization_enabled) { | |
| 757 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuRasterizationTriggered", | |
| 758 has_gpu_rasterization_trigger_); | |
| 759 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuRasterizationSuitableContent", | |
| 760 content_is_suitable_for_gpu_rasterization_); | |
| 761 // Record how many pages actually get gpu rasterization when enabled. | |
| 762 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuRasterizationUsed", | |
| 763 (has_gpu_rasterization_trigger_ && | |
| 764 content_is_suitable_for_gpu_rasterization_)); | |
| 765 } | |
| 766 | |
| 767 gpu_rasterization_histogram_recorded_ = true; | |
| 768 } | |
| 769 | |
| 770 bool LayerTreeHost::UsingSharedMemoryResources() { | |
| 771 return GetRendererCapabilities().using_shared_memory_resources; | |
| 772 } | |
| 773 | |
| 774 bool LayerTreeHost::UpdateLayers(Layer* root_layer, | |
| 775 ResourceUpdateQueue* queue) { | |
| 776 TRACE_EVENT1("cc", "LayerTreeHost::UpdateLayers", | |
| 777 "source_frame_number", source_frame_number()); | |
| 778 | |
| 779 RenderSurfaceLayerList update_list; | |
| 780 { | |
| 781 UpdateHudLayer(); | |
| 782 | |
| 783 Layer* root_scroll = FindFirstScrollableLayer(root_layer); | |
| 784 Layer* page_scale_layer = page_scale_layer_.get(); | |
| 785 if (!page_scale_layer && root_scroll) | |
| 786 page_scale_layer = root_scroll->parent(); | |
| 787 | |
| 788 if (hud_layer_.get()) { | |
| 789 hud_layer_->PrepareForCalculateDrawProperties( | |
| 790 device_viewport_size(), device_scale_factor_); | |
| 791 } | |
| 792 | |
| 793 TRACE_EVENT0("cc", "LayerTreeHost::UpdateLayers::CalcDrawProps"); | |
| 794 bool can_render_to_separate_surface = true; | |
| 795 // TODO(vmpstr): Passing 0 as the current render surface layer list id means | |
| 796 // that we won't be able to detect if a layer is part of |update_list|. | |
| 797 // Change this if this information is required. | |
| 798 int render_surface_layer_list_id = 0; | |
| 799 LayerTreeHostCommon::CalcDrawPropsMainInputs inputs( | |
| 800 root_layer, device_viewport_size(), gfx::Transform(), | |
| 801 device_scale_factor_, page_scale_factor_, page_scale_layer, | |
| 802 elastic_overscroll_, overscroll_elasticity_layer_.get(), | |
| 803 GetRendererCapabilities().max_texture_size, settings_.can_use_lcd_text, | |
| 804 settings_.layers_always_allowed_lcd_text, | |
| 805 can_render_to_separate_surface, | |
| 806 settings_.layer_transforms_should_scale_layer_contents, | |
| 807 settings_.verify_property_trees, &update_list, | |
| 808 render_surface_layer_list_id); | |
| 809 LayerTreeHostCommon::CalculateDrawProperties(&inputs); | |
| 810 } | |
| 811 | |
| 812 // Reset partial texture update requests. | |
| 813 partial_texture_update_requests_ = 0; | |
| 814 | |
| 815 bool did_paint_content = false; | |
| 816 bool need_more_updates = false; | |
| 817 PaintLayerContents( | |
| 818 update_list, queue, &did_paint_content, &need_more_updates); | |
| 819 if (need_more_updates) { | |
| 820 TRACE_EVENT0("cc", "LayerTreeHost::UpdateLayers::posting prepaint task"); | |
| 821 prepaint_callback_.Reset(base::Bind(&LayerTreeHost::TriggerPrepaint, | |
| 822 base::Unretained(this))); | |
| 823 static base::TimeDelta prepaint_delay = | |
| 824 base::TimeDelta::FromMilliseconds(100); | |
| 825 base::MessageLoop::current()->PostDelayedTask( | |
| 826 FROM_HERE, prepaint_callback_.callback(), prepaint_delay); | |
| 827 } | |
| 828 | |
| 829 return did_paint_content; | |
| 830 } | |
| 831 | |
| 832 void LayerTreeHost::TriggerPrepaint() { | |
| 833 prepaint_callback_.Cancel(); | |
| 834 TRACE_EVENT0("cc", "LayerTreeHost::TriggerPrepaint"); | |
| 835 SetNeedsCommit(); | |
| 836 } | |
| 837 | |
| 838 void LayerTreeHost::ReduceMemoryUsage() { | |
| 839 if (!root_layer()) | |
| 840 return; | |
| 841 | |
| 842 LayerTreeHostCommon::CallFunctionForSubtree( | |
| 843 root_layer(), [](Layer* layer) { layer->ReduceMemoryUsage(); }); | |
| 844 } | |
| 845 | |
| 846 void LayerTreeHost::SetPrioritiesForSurfaces(size_t surface_memory_bytes) { | |
| 847 DCHECK(surface_memory_placeholder_); | |
| 848 | |
| 849 // Surfaces have a place holder for their memory since they are managed | |
| 850 // independantly but should still be tracked and reduce other memory usage. | |
| 851 surface_memory_placeholder_->SetTextureManager( | |
| 852 contents_texture_manager_.get()); | |
| 853 surface_memory_placeholder_->set_request_priority( | |
| 854 PriorityCalculator::RenderSurfacePriority()); | |
| 855 surface_memory_placeholder_->SetToSelfManagedMemoryPlaceholder( | |
| 856 surface_memory_bytes); | |
| 857 } | |
| 858 | |
| 859 void LayerTreeHost::SetPrioritiesForLayers( | |
| 860 const RenderSurfaceLayerList& update_list) { | |
| 861 PriorityCalculator calculator; | |
| 862 typedef LayerIterator<Layer> LayerIteratorType; | |
| 863 LayerIteratorType end = LayerIteratorType::End(&update_list); | |
| 864 for (LayerIteratorType it = LayerIteratorType::Begin(&update_list); | |
| 865 it != end; | |
| 866 ++it) { | |
| 867 if (it.represents_itself()) { | |
| 868 it->SetTexturePriorities(calculator); | |
| 869 } else if (it.represents_target_render_surface()) { | |
| 870 if (it->mask_layer()) | |
| 871 it->mask_layer()->SetTexturePriorities(calculator); | |
| 872 if (it->replica_layer() && it->replica_layer()->mask_layer()) | |
| 873 it->replica_layer()->mask_layer()->SetTexturePriorities(calculator); | |
| 874 } | |
| 875 } | |
| 876 } | |
| 877 | |
| 878 void LayerTreeHost::PrioritizeTextures( | |
| 879 const RenderSurfaceLayerList& render_surface_layer_list) { | |
| 880 if (!contents_texture_manager_) | |
| 881 return; | |
| 882 | |
| 883 contents_texture_manager_->ClearPriorities(); | |
| 884 | |
| 885 size_t memory_for_render_surfaces_metric = | |
| 886 CalculateMemoryForRenderSurfaces(render_surface_layer_list); | |
| 887 | |
| 888 SetPrioritiesForLayers(render_surface_layer_list); | |
| 889 SetPrioritiesForSurfaces(memory_for_render_surfaces_metric); | |
| 890 | |
| 891 contents_texture_manager_->PrioritizeTextures(); | |
| 892 } | |
| 893 | |
| 894 size_t LayerTreeHost::CalculateMemoryForRenderSurfaces( | |
| 895 const RenderSurfaceLayerList& update_list) { | |
| 896 size_t readback_bytes = 0; | |
| 897 size_t contents_texture_bytes = 0; | |
| 898 | |
| 899 // Start iteration at 1 to skip the root surface as it does not have a texture | |
| 900 // cost. | |
| 901 for (size_t i = 1; i < update_list.size(); ++i) { | |
| 902 Layer* render_surface_layer = update_list.at(i); | |
| 903 RenderSurface* render_surface = render_surface_layer->render_surface(); | |
| 904 | |
| 905 size_t bytes = | |
| 906 Resource::MemorySizeBytes(render_surface->content_rect().size(), | |
| 907 RGBA_8888); | |
| 908 contents_texture_bytes += bytes; | |
| 909 | |
| 910 if (render_surface_layer->background_filters().IsEmpty() && | |
| 911 render_surface_layer->uses_default_blend_mode()) | |
| 912 continue; | |
| 913 | |
| 914 if (!readback_bytes) { | |
| 915 readback_bytes = Resource::MemorySizeBytes(device_viewport_size_, | |
| 916 RGBA_8888); | |
| 917 } | |
| 918 } | |
| 919 return readback_bytes + contents_texture_bytes; | |
| 920 } | |
| 921 | |
| 922 void LayerTreeHost::PaintMasksForRenderSurface(Layer* render_surface_layer, | |
| 923 ResourceUpdateQueue* queue, | |
| 924 bool* did_paint_content, | |
| 925 bool* need_more_updates) { | |
| 926 // Note: Masks and replicas only exist for layers that own render surfaces. If | |
| 927 // we reach this point in code, we already know that at least something will | |
| 928 // be drawn into this render surface, so the mask and replica should be | |
| 929 // painted. | |
| 930 | |
| 931 Layer* mask_layer = render_surface_layer->mask_layer(); | |
| 932 if (mask_layer) { | |
| 933 *did_paint_content |= mask_layer->Update(queue, NULL); | |
| 934 *need_more_updates |= mask_layer->NeedMoreUpdates(); | |
| 935 } | |
| 936 | |
| 937 Layer* replica_mask_layer = | |
| 938 render_surface_layer->replica_layer() ? | |
| 939 render_surface_layer->replica_layer()->mask_layer() : NULL; | |
| 940 if (replica_mask_layer) { | |
| 941 *did_paint_content |= replica_mask_layer->Update(queue, NULL); | |
| 942 *need_more_updates |= replica_mask_layer->NeedMoreUpdates(); | |
| 943 } | |
| 944 } | |
| 945 | |
| 946 void LayerTreeHost::PaintLayerContents( | |
| 947 const RenderSurfaceLayerList& render_surface_layer_list, | |
| 948 ResourceUpdateQueue* queue, | |
| 949 bool* did_paint_content, | |
| 950 bool* need_more_updates) { | |
| 951 OcclusionTracker<Layer> occlusion_tracker( | |
| 952 root_layer_->render_surface()->content_rect()); | |
| 953 occlusion_tracker.set_minimum_tracking_size( | |
| 954 settings_.minimum_occlusion_tracking_size); | |
| 955 | |
| 956 PrioritizeTextures(render_surface_layer_list); | |
| 957 | |
| 958 in_paint_layer_contents_ = true; | |
| 959 | |
| 960 // Iterates front-to-back to allow for testing occlusion and performing | |
| 961 // culling during the tree walk. | |
| 962 typedef LayerIterator<Layer> LayerIteratorType; | |
| 963 LayerIteratorType end = LayerIteratorType::End(&render_surface_layer_list); | |
| 964 for (LayerIteratorType it = | |
| 965 LayerIteratorType::Begin(&render_surface_layer_list); | |
| 966 it != end; | |
| 967 ++it) { | |
| 968 occlusion_tracker.EnterLayer(it); | |
| 969 | |
| 970 if (it.represents_target_render_surface()) { | |
| 971 PaintMasksForRenderSurface( | |
| 972 *it, queue, did_paint_content, need_more_updates); | |
| 973 } else if (it.represents_itself()) { | |
| 974 DCHECK(!it->paint_properties().bounds.IsEmpty()); | |
| 975 *did_paint_content |= it->Update(queue, &occlusion_tracker); | |
| 976 *need_more_updates |= it->NeedMoreUpdates(); | |
| 977 // Note the '&&' with previous is-suitable state. | |
| 978 // This means that once the layer-tree becomes unsuitable for gpu | |
| 979 // rasterization due to some content, it will continue to be unsuitable | |
| 980 // even if that content is replaced by gpu-friendly content. | |
| 981 // This is to avoid switching back-and-forth between gpu and sw | |
| 982 // rasterization which may be both bad for performance and visually | |
| 983 // jarring. | |
| 984 content_is_suitable_for_gpu_rasterization_ &= | |
| 985 it->IsSuitableForGpuRasterization(); | |
| 986 } | |
| 987 | |
| 988 occlusion_tracker.LeaveLayer(it); | |
| 989 } | |
| 990 | |
| 991 in_paint_layer_contents_ = false; | |
| 992 } | |
| 993 | |
| 994 void LayerTreeHost::ApplyScrollAndScale(ScrollAndScaleSet* info) { | |
| 995 ScopedPtrVector<SwapPromise>::iterator it = info->swap_promises.begin(); | |
| 996 for (; it != info->swap_promises.end(); ++it) { | |
| 997 scoped_ptr<SwapPromise> swap_promise(info->swap_promises.take(it)); | |
| 998 TRACE_EVENT_FLOW_STEP0("input", | |
| 999 "LatencyInfo.Flow", | |
| 1000 TRACE_ID_DONT_MANGLE(swap_promise->TraceId()), | |
| 1001 "Main thread scroll update"); | |
| 1002 QueueSwapPromise(swap_promise.Pass()); | |
| 1003 } | |
| 1004 | |
| 1005 gfx::Vector2dF inner_viewport_scroll_delta; | |
| 1006 gfx::Vector2dF outer_viewport_scroll_delta; | |
| 1007 | |
| 1008 if (root_layer_.get()) { | |
| 1009 for (size_t i = 0; i < info->scrolls.size(); ++i) { | |
| 1010 Layer* layer = LayerTreeHostCommon::FindLayerInSubtree( | |
| 1011 root_layer_.get(), info->scrolls[i].layer_id); | |
| 1012 if (!layer) | |
| 1013 continue; | |
| 1014 if (layer == outer_viewport_scroll_layer_.get()) { | |
| 1015 outer_viewport_scroll_delta += info->scrolls[i].scroll_delta; | |
| 1016 } else if (layer == inner_viewport_scroll_layer_.get()) { | |
| 1017 inner_viewport_scroll_delta += info->scrolls[i].scroll_delta; | |
| 1018 } else { | |
| 1019 layer->SetScrollOffsetFromImplSide( | |
| 1020 gfx::ScrollOffsetWithDelta(layer->scroll_offset(), | |
| 1021 info->scrolls[i].scroll_delta)); | |
| 1022 } | |
| 1023 } | |
| 1024 } | |
| 1025 | |
| 1026 if (!inner_viewport_scroll_delta.IsZero() || | |
| 1027 !outer_viewport_scroll_delta.IsZero() || info->page_scale_delta != 1.f || | |
| 1028 !info->elastic_overscroll_delta.IsZero() || info->top_controls_delta) { | |
| 1029 // Preemptively apply the scroll offset and scale delta here before sending | |
| 1030 // it to the client. If the client comes back and sets it to the same | |
| 1031 // value, then the layer can early out without needing a full commit. | |
| 1032 if (inner_viewport_scroll_layer_.get()) { | |
| 1033 inner_viewport_scroll_layer_->SetScrollOffsetFromImplSide( | |
| 1034 gfx::ScrollOffsetWithDelta( | |
| 1035 inner_viewport_scroll_layer_->scroll_offset(), | |
| 1036 inner_viewport_scroll_delta)); | |
| 1037 } | |
| 1038 | |
| 1039 if (outer_viewport_scroll_layer_.get()) { | |
| 1040 outer_viewport_scroll_layer_->SetScrollOffsetFromImplSide( | |
| 1041 gfx::ScrollOffsetWithDelta( | |
| 1042 outer_viewport_scroll_layer_->scroll_offset(), | |
| 1043 outer_viewport_scroll_delta)); | |
| 1044 } | |
| 1045 | |
| 1046 ApplyPageScaleDeltaFromImplSide(info->page_scale_delta); | |
| 1047 elastic_overscroll_ += info->elastic_overscroll_delta; | |
| 1048 if (!settings_.use_pinch_virtual_viewport) { | |
| 1049 // TODO(miletus): Make sure either this code path is totally gone, | |
| 1050 // or revisit the flooring here if the old pinch viewport code path | |
| 1051 // is causing problems with fractional scroll offset. | |
| 1052 client_->ApplyViewportDeltas( | |
| 1053 gfx::ToFlooredVector2d(inner_viewport_scroll_delta + | |
| 1054 outer_viewport_scroll_delta), | |
| 1055 info->page_scale_delta, info->top_controls_delta); | |
| 1056 } else { | |
| 1057 // TODO(ccameron): pass the elastic overscroll here so that input events | |
| 1058 // may be translated appropriately. | |
| 1059 client_->ApplyViewportDeltas( | |
| 1060 inner_viewport_scroll_delta, outer_viewport_scroll_delta, | |
| 1061 info->elastic_overscroll_delta, info->page_scale_delta, | |
| 1062 info->top_controls_delta); | |
| 1063 } | |
| 1064 } | |
| 1065 } | |
| 1066 | |
| 1067 void LayerTreeHost::StartRateLimiter() { | |
| 1068 if (inside_begin_main_frame_) | |
| 1069 return; | |
| 1070 | |
| 1071 if (!rate_limit_timer_.IsRunning()) { | |
| 1072 rate_limit_timer_.Start(FROM_HERE, | |
| 1073 base::TimeDelta(), | |
| 1074 this, | |
| 1075 &LayerTreeHost::RateLimit); | |
| 1076 } | |
| 1077 } | |
| 1078 | |
| 1079 void LayerTreeHost::StopRateLimiter() { | |
| 1080 rate_limit_timer_.Stop(); | |
| 1081 } | |
| 1082 | |
| 1083 void LayerTreeHost::RateLimit() { | |
| 1084 // Force a no-op command on the compositor context, so that any ratelimiting | |
| 1085 // commands will wait for the compositing context, and therefore for the | |
| 1086 // SwapBuffers. | |
| 1087 proxy_->ForceSerializeOnSwapBuffers(); | |
| 1088 client_->RateLimitSharedMainThreadContext(); | |
| 1089 } | |
| 1090 | |
| 1091 bool LayerTreeHost::AlwaysUsePartialTextureUpdates() { | |
| 1092 if (!proxy_->GetRendererCapabilities().allow_partial_texture_updates) | |
| 1093 return false; | |
| 1094 return !proxy_->HasImplThread(); | |
| 1095 } | |
| 1096 | |
| 1097 size_t LayerTreeHost::MaxPartialTextureUpdates() const { | |
| 1098 size_t max_partial_texture_updates = 0; | |
| 1099 if (proxy_->GetRendererCapabilities().allow_partial_texture_updates && | |
| 1100 !settings_.impl_side_painting) { | |
| 1101 max_partial_texture_updates = | |
| 1102 std::min(settings_.max_partial_texture_updates, | |
| 1103 proxy_->MaxPartialTextureUpdates()); | |
| 1104 } | |
| 1105 return max_partial_texture_updates; | |
| 1106 } | |
| 1107 | |
| 1108 bool LayerTreeHost::RequestPartialTextureUpdate() { | |
| 1109 if (partial_texture_update_requests_ >= MaxPartialTextureUpdates()) | |
| 1110 return false; | |
| 1111 | |
| 1112 partial_texture_update_requests_++; | |
| 1113 return true; | |
| 1114 } | |
| 1115 | |
| 1116 void LayerTreeHost::SetDeviceScaleFactor(float device_scale_factor) { | |
| 1117 if (device_scale_factor == device_scale_factor_) | |
| 1118 return; | |
| 1119 device_scale_factor_ = device_scale_factor; | |
| 1120 | |
| 1121 SetNeedsCommit(); | |
| 1122 } | |
| 1123 | |
| 1124 void LayerTreeHost::UpdateTopControlsState(TopControlsState constraints, | |
| 1125 TopControlsState current, | |
| 1126 bool animate) { | |
| 1127 // Top controls are only used in threaded mode. | |
| 1128 proxy_->ImplThreadTaskRunner()->PostTask( | |
| 1129 FROM_HERE, | |
| 1130 base::Bind(&TopControlsManager::UpdateTopControlsState, | |
| 1131 top_controls_manager_weak_ptr_, | |
| 1132 constraints, | |
| 1133 current, | |
| 1134 animate)); | |
| 1135 } | |
| 1136 | |
| 1137 void LayerTreeHost::AnimateLayers(base::TimeTicks monotonic_time) { | |
| 1138 if (!settings_.accelerated_animation_enabled) | |
| 1139 return; | |
| 1140 | |
| 1141 if (animation_registrar_->AnimateLayers(monotonic_time)) | |
| 1142 animation_registrar_->UpdateAnimationState(true, NULL); | |
| 1143 } | |
| 1144 | |
| 1145 UIResourceId LayerTreeHost::CreateUIResource(UIResourceClient* client) { | |
| 1146 DCHECK(client); | |
| 1147 | |
| 1148 UIResourceId next_id = next_ui_resource_id_++; | |
| 1149 DCHECK(ui_resource_client_map_.find(next_id) == | |
| 1150 ui_resource_client_map_.end()); | |
| 1151 | |
| 1152 bool resource_lost = false; | |
| 1153 UIResourceRequest request(UIResourceRequest::UI_RESOURCE_CREATE, next_id, | |
| 1154 client->GetBitmap(next_id, resource_lost)); | |
| 1155 ui_resource_request_queue_.push_back(request); | |
| 1156 | |
| 1157 UIResourceClientData data; | |
| 1158 data.client = client; | |
| 1159 data.size = request.GetBitmap().GetSize(); | |
| 1160 | |
| 1161 ui_resource_client_map_[request.GetId()] = data; | |
| 1162 return request.GetId(); | |
| 1163 } | |
| 1164 | |
| 1165 // Deletes a UI resource. May safely be called more than once. | |
| 1166 void LayerTreeHost::DeleteUIResource(UIResourceId uid) { | |
| 1167 UIResourceClientMap::iterator iter = ui_resource_client_map_.find(uid); | |
| 1168 if (iter == ui_resource_client_map_.end()) | |
| 1169 return; | |
| 1170 | |
| 1171 UIResourceRequest request(UIResourceRequest::UI_RESOURCE_DELETE, uid); | |
| 1172 ui_resource_request_queue_.push_back(request); | |
| 1173 ui_resource_client_map_.erase(iter); | |
| 1174 } | |
| 1175 | |
| 1176 void LayerTreeHost::RecreateUIResources() { | |
| 1177 for (UIResourceClientMap::iterator iter = ui_resource_client_map_.begin(); | |
| 1178 iter != ui_resource_client_map_.end(); | |
| 1179 ++iter) { | |
| 1180 UIResourceId uid = iter->first; | |
| 1181 const UIResourceClientData& data = iter->second; | |
| 1182 bool resource_lost = true; | |
| 1183 UIResourceRequest request(UIResourceRequest::UI_RESOURCE_CREATE, uid, | |
| 1184 data.client->GetBitmap(uid, resource_lost)); | |
| 1185 ui_resource_request_queue_.push_back(request); | |
| 1186 } | |
| 1187 } | |
| 1188 | |
| 1189 // Returns the size of a resource given its id. | |
| 1190 gfx::Size LayerTreeHost::GetUIResourceSize(UIResourceId uid) const { | |
| 1191 UIResourceClientMap::const_iterator iter = ui_resource_client_map_.find(uid); | |
| 1192 if (iter == ui_resource_client_map_.end()) | |
| 1193 return gfx::Size(); | |
| 1194 | |
| 1195 const UIResourceClientData& data = iter->second; | |
| 1196 return data.size; | |
| 1197 } | |
| 1198 | |
| 1199 void LayerTreeHost::RegisterViewportLayers( | |
| 1200 scoped_refptr<Layer> overscroll_elasticity_layer, | |
| 1201 scoped_refptr<Layer> page_scale_layer, | |
| 1202 scoped_refptr<Layer> inner_viewport_scroll_layer, | |
| 1203 scoped_refptr<Layer> outer_viewport_scroll_layer) { | |
| 1204 overscroll_elasticity_layer_ = overscroll_elasticity_layer; | |
| 1205 page_scale_layer_ = page_scale_layer; | |
| 1206 inner_viewport_scroll_layer_ = inner_viewport_scroll_layer; | |
| 1207 outer_viewport_scroll_layer_ = outer_viewport_scroll_layer; | |
| 1208 } | |
| 1209 | |
| 1210 void LayerTreeHost::RegisterSelection(const LayerSelectionBound& start, | |
| 1211 const LayerSelectionBound& end) { | |
| 1212 if (selection_start_ == start && selection_end_ == end) | |
| 1213 return; | |
| 1214 | |
| 1215 selection_start_ = start; | |
| 1216 selection_end_ = end; | |
| 1217 SetNeedsCommit(); | |
| 1218 } | |
| 1219 | |
| 1220 int LayerTreeHost::ScheduleMicroBenchmark( | |
| 1221 const std::string& benchmark_name, | |
| 1222 scoped_ptr<base::Value> value, | |
| 1223 const MicroBenchmark::DoneCallback& callback) { | |
| 1224 return micro_benchmark_controller_.ScheduleRun( | |
| 1225 benchmark_name, value.Pass(), callback); | |
| 1226 } | |
| 1227 | |
| 1228 bool LayerTreeHost::SendMessageToMicroBenchmark(int id, | |
| 1229 scoped_ptr<base::Value> value) { | |
| 1230 return micro_benchmark_controller_.SendMessage(id, value.Pass()); | |
| 1231 } | |
| 1232 | |
| 1233 void LayerTreeHost::InsertSwapPromiseMonitor(SwapPromiseMonitor* monitor) { | |
| 1234 swap_promise_monitor_.insert(monitor); | |
| 1235 } | |
| 1236 | |
| 1237 void LayerTreeHost::RemoveSwapPromiseMonitor(SwapPromiseMonitor* monitor) { | |
| 1238 swap_promise_monitor_.erase(monitor); | |
| 1239 } | |
| 1240 | |
| 1241 void LayerTreeHost::NotifySwapPromiseMonitorsOfSetNeedsCommit() { | |
| 1242 std::set<SwapPromiseMonitor*>::iterator it = swap_promise_monitor_.begin(); | |
| 1243 for (; it != swap_promise_monitor_.end(); it++) | |
| 1244 (*it)->OnSetNeedsCommitOnMain(); | |
| 1245 } | |
| 1246 | |
| 1247 void LayerTreeHost::QueueSwapPromise(scoped_ptr<SwapPromise> swap_promise) { | |
| 1248 DCHECK(swap_promise); | |
| 1249 swap_promise_list_.push_back(swap_promise.Pass()); | |
| 1250 } | |
| 1251 | |
| 1252 void LayerTreeHost::BreakSwapPromises(SwapPromise::DidNotSwapReason reason) { | |
| 1253 for (size_t i = 0; i < swap_promise_list_.size(); i++) | |
| 1254 swap_promise_list_[i]->DidNotSwap(reason); | |
| 1255 swap_promise_list_.clear(); | |
| 1256 } | |
| 1257 | |
| 1258 void LayerTreeHost::set_surface_id_namespace(uint32_t id_namespace) { | |
| 1259 surface_id_namespace_ = id_namespace; | |
| 1260 } | |
| 1261 | |
| 1262 SurfaceSequence LayerTreeHost::CreateSurfaceSequence() { | |
| 1263 return SurfaceSequence(surface_id_namespace_, next_surface_sequence_++); | |
| 1264 } | |
| 1265 | |
| 1266 void LayerTreeHost::SetChildrenNeedBeginFrames( | |
| 1267 bool children_need_begin_frames) const { | |
| 1268 proxy_->SetChildrenNeedBeginFrames(children_need_begin_frames); | |
| 1269 } | |
| 1270 | |
| 1271 void LayerTreeHost::SendBeginFramesToChildren( | |
| 1272 const BeginFrameArgs& args) const { | |
| 1273 client_->SendBeginFramesToChildren(args); | |
| 1274 } | |
| 1275 | |
| 1276 } // namespace cc | |
| OLD | NEW |