Index: cc/tile_manager.cc |
diff --git a/cc/tile_manager.cc b/cc/tile_manager.cc |
index 932c2a836d4181fd8c6f099fceaf76c5e223bbf4..424d3ad0f151f55f820d645db91e10ceab15eea3 100644 |
--- a/cc/tile_manager.cc |
+++ b/cc/tile_manager.cc |
@@ -66,6 +66,16 @@ class RasterThread : public base::Thread { |
base::Bind(&RasterThread::RunReply, base::Unretained(this), reply)); |
} |
+ void PostImageDecodingTaskAndReply(const tracked_objects::Location& from_here, |
+ skia::LazyPixelRef* pixel_ref, |
+ const base::Closure& reply) { |
+ ++num_pending_tasks_; |
+ message_loop_proxy()->PostTaskAndReply( |
+ from_here, |
+ base::Bind(&skia::LazyPixelRef::Decode, base::Unretained(pixel_ref)), |
+ base::Bind(&RasterThread::RunReply, base::Unretained(this), reply)); |
+ } |
+ |
private: |
static void RunRasterTask(PicturePileImpl* picture_pile, |
uint8_t* mapped_buffer, |
@@ -246,6 +256,10 @@ void TileManager::ManageTiles() { |
} |
mts.bin = EVENTUALLY_BIN; |
+ |
+ // Update all the SkPixelRefs this tile intersects. |
+ mts.pending_pixel_refs.clear(); |
+ mts.has_image_decoding_info = false; |
reveman
2012/12/11 01:35:42
Why do you clear this here? Shouldn't we just init
qinmin
2012/12/11 04:30:15
moved it to the ctor
On 2012/12/11 01:35:42, David
|
} |
// Memory limit policy works by mapping some bin states to the NEVER bin. |
@@ -284,7 +298,7 @@ void TileManager::ManageTiles() { |
AssignGpuMemoryToTiles(); |
// Finally, kick the rasterizer. |
- DispatchMoreRasterTasks(); |
+ DispatchMoreTasks(); |
} |
void TileManager::CheckForCompletedSetPixels() { |
@@ -363,6 +377,15 @@ void TileManager::AssignGpuMemoryToTiles() { |
tiles_that_need_to_be_rasterized_.end()); |
} |
+void TileManager::GetImageInformationForTile(Tile* tile) { |
reveman
2012/12/11 01:35:42
I don't think having this in a separate function i
qinmin
2012/12/11 04:30:15
Done.
|
+ ManagedTileState& managed_state = tile->managed_state(); |
+ if (!managed_state.has_image_decoding_info) { |
+ const_cast<PicturePileImpl *>(tile->picture_pile())->GatherPixelRefs( |
+ tile->content_rect_, managed_state.pending_pixel_refs); |
+ managed_state.has_image_decoding_info = true; |
+ } |
+} |
+ |
void TileManager::FreeResourcesForTile(Tile* tile) { |
ManagedTileState& managed_tile_state = tile->managed_state(); |
DCHECK(managed_tile_state.can_be_freed); |
@@ -370,28 +393,113 @@ void TileManager::FreeResourcesForTile(Tile* tile) { |
resource_pool_->ReleaseResource(managed_tile_state.resource.Pass()); |
} |
-void TileManager::DispatchMoreRasterTasks() { |
- while (!tiles_that_need_to_be_rasterized_.empty()) { |
- RasterThread* thread = 0; |
- |
- for (RasterThreadVector::iterator it = raster_threads_.begin(); |
- it != raster_threads_.end(); ++it) { |
- if ((*it)->num_pending_tasks() == kNumPendingRasterTasksPerThread) |
- continue; |
- // Check if this is the best thread we've found so far. |
- if (!thread || (*it)->num_pending_tasks() < thread->num_pending_tasks()) |
- thread = *it; |
+RasterThread* TileManager::GetFreeRasterThread() { |
+ RasterThread* thread = 0; |
+ for (RasterThreadVector::iterator it = raster_threads_.begin(); |
+ it != raster_threads_.end(); ++it) { |
+ if ((*it)->num_pending_tasks() == kNumPendingRasterTasksPerThread) |
+ continue; |
+ // Check if this is the best thread we've found so far. |
+ if (!thread || (*it)->num_pending_tasks() < thread->num_pending_tasks()) |
+ thread = *it; |
+ } |
+ return thread; |
+} |
+ |
+void TileManager::DispatchMoreTasks() { |
+ // Because tiles in the image decoding list have higher priorities, we |
+ // need to process those tiles first before we start to handle the tiles |
+ // in the need_to_be_rasterized queue. |
+ for (TileList::iterator it = tiles_have_image_decoding_tasks_.begin(); |
+ it != tiles_have_image_decoding_tasks_.end(); ++it) { |
+ if (!HasImageDecodingTasks(*it)) { |
+ RasterThread* thread = GetFreeRasterThread(); |
+ if (!thread) |
+ return; |
+ DispatchOneRasterTask(thread, *it); |
+ tiles_have_image_decoding_tasks_.erase(it); |
reveman
2012/12/11 01:35:42
do you need a temporary "to be removed" list here?
qinmin
2012/12/11 04:30:15
Changed the code to erase(it++).
On 2012/12/11 01
|
} |
+ } |
- // Stop dispatching tasks when all threads are busy. |
+ // Process all tiles in the need_to_be_rasterized queue. If a tile has |
+ // image decoding tasks, put it to the back of the image decoding list. |
+ while (!tiles_that_need_to_be_rasterized_.empty()) { |
+ RasterThread* thread = GetFreeRasterThread(); |
if (!thread) |
return; |
- DispatchOneRasterTask(thread, tiles_that_need_to_be_rasterized_.back()); |
+ Tile* tile = tiles_that_need_to_be_rasterized_.back(); |
+ if (HasImageDecodingTasks(tile)) |
+ tiles_have_image_decoding_tasks_.push_back(tile); |
+ else |
+ DispatchOneRasterTask(thread, tile); |
reveman
2012/12/11 01:35:42
hm, thread might not be the correct thread to disp
qinmin
2012/12/11 04:30:15
You are right, somehow i missed that when rewritin
|
tiles_that_need_to_be_rasterized_.pop_back(); |
} |
} |
+bool TileManager::HasImageDecodingTasks(Tile* tile) { |
reveman
2012/12/11 01:35:42
Can you change this to something like:
void Dispat
qinmin
2012/12/11 04:30:15
Done.
On 2012/12/11 01:35:42, David Reveman wrote
|
+ if (!tile->managed_state().has_image_decoding_info) |
+ GetImageInformationForTile(tile); |
+ |
+ RasterThread* thread = 0; |
+ std::list<skia::LazyPixelRef*>& pending_pixel_refs = |
+ tile->managed_state().pending_pixel_refs; |
+ for (std::list<skia::LazyPixelRef*>::iterator it = pending_pixel_refs.begin(); |
+ it != pending_pixel_refs.end(); ++it) { |
+ if (pending_decode_tasks_.end() != pending_decode_tasks_.find( |
+ (*it)->getGenerationID())) |
+ continue; |
+ if ((*it)->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) { |
+ pending_pixel_refs.erase(it); |
+ } else { |
+ thread = GetFreeRasterThread(); |
+ if (thread) |
+ DispatchOneImageDecodingTask(thread, tile, *it); |
+ } |
+ } |
+ |
+ return !pending_pixel_refs.empty(); |
+} |
+ |
+void TileManager::DispatchOneImageDecodingTask(RasterThread* thread, |
+ scoped_refptr<Tile> tile, |
+ skia::LazyPixelRef* pixel_ref) { |
+ TRACE_EVENT0("cc", "TileManager::DispatchOneImageDecodingTask"); |
+ uint32_t pixel_ref_id = pixel_ref->getGenerationID(); |
+ DCHECK(pending_decode_tasks_.end() == |
+ pending_decode_tasks_.find(pixel_ref_id)); |
+ pending_decode_tasks_[pixel_ref_id] = pixel_ref; |
+ |
+ thread->PostImageDecodingTaskAndReply( |
+ FROM_HERE, |
+ pixel_ref, |
+ base::Bind(&TileManager::OnImageDecodingTaskCompleted, |
+ base::Unretained(this), |
+ tile, |
+ pixel_ref_id)); |
+} |
+ |
+void TileManager::OnImageDecodingTaskCompleted(scoped_refptr<Tile> tile, |
+ uint32_t pixel_ref_id) { |
+ TRACE_EVENT0("cc", "TileManager::OnImageDecoded"); |
+ pending_decode_tasks_.erase(pixel_ref_id); |
+ |
+ for (TileList::iterator it = tiles_have_image_decoding_tasks_.begin(); |
+ it != tiles_have_image_decoding_tasks_.end(); ++it) { |
+ std::list<skia::LazyPixelRef*>& pixel_refs = |
+ (*it)->managed_state().pending_pixel_refs; |
+ for (std::list<skia::LazyPixelRef*>::iterator pixel_it = |
+ pixel_refs.begin(); pixel_it != pixel_refs.end(); ++pixel_it) { |
+ if (pixel_ref_id == (*pixel_it)->getGenerationID()) { |
+ pixel_refs.erase(pixel_it); |
+ break; |
+ } |
+ } |
+ } |
+ |
+ DispatchMoreTasks(); |
+} |
+ |
void TileManager::DispatchOneRasterTask( |
RasterThread* thread, scoped_refptr<Tile> tile) { |
TRACE_EVENT0("cc", "TileManager::DispatchOneRasterTask"); |
@@ -446,7 +554,7 @@ void TileManager::OnRasterTaskCompleted( |
// tiles. The result of this could be that this tile is no longer |
// allowed to use gpu memory and in that case we need to abort |
// initialization and free all associated resources before calling |
- // DispatchMoreRasterTasks(). |
+ // DispatchMoreTasks(). |
AssignGpuMemoryToTiles(); |
// Finish resource initialization if |can_use_gpu_memory| is true. |
@@ -471,7 +579,7 @@ void TileManager::OnRasterTaskCompleted( |
managed_tile_state.resource_is_being_initialized = false; |
} |
- DispatchMoreRasterTasks(); |
+ DispatchMoreTasks(); |
} |
void TileManager::DidFinishTileInitialization(Tile* tile) { |