Index: cc/tiles/gpu_image_decode_controller.cc |
diff --git a/cc/tiles/gpu_image_decode_controller.cc b/cc/tiles/gpu_image_decode_controller.cc |
index 730ea6fa5764cfc516e6f20387a1d5b9b0fd55cb..0554c69157429dc2a31980b9ae4f00c6854d946a 100644 |
--- a/cc/tiles/gpu_image_decode_controller.cc |
+++ b/cc/tiles/gpu_image_decode_controller.cc |
@@ -548,62 +548,74 @@ void GpuImageDecodeController::SetShouldAggressivelyFreeResources( |
bool GpuImageDecodeController::OnMemoryDump( |
const base::trace_event::MemoryDumpArgs& args, |
base::trace_event::ProcessMemoryDump* pmd) { |
+ using base::trace_event::MemoryAllocatorDump; |
+ using base::trace_event::MemoryAllocatorDumpGuid; |
+ using base::trace_event::MemoryDumpLevelOfDetail; |
+ |
TRACE_EVENT0("disabled-by-default-cc.debug", |
ssid
2016/10/07 00:07:21
Shouldn't this be using TRACE_DISABLED_BY_DEFAULT
ericrk
2016/10/13 23:58:13
sounds good - I put up a CL to update all of cc. c
|
"GpuImageDecodeController::OnMemoryDump"); |
- for (const auto& image_pair : persistent_cache_) { |
- const ImageData* image_data = image_pair.second.get(); |
- const uint32_t image_id = image_pair.first; |
- |
- // If we have discardable decoded data, dump this here. |
- if (image_data->decode.data()) { |
- std::string discardable_dump_name = base::StringPrintf( |
- "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", |
- reinterpret_cast<uintptr_t>(this), image_id); |
- base::trace_event::MemoryAllocatorDump* dump = |
- image_data->decode.data()->CreateMemoryAllocatorDump( |
- discardable_dump_name.c_str(), pmd); |
- // If our image is locked, dump the "locked_size" as an additional column. |
- // This lets us see the amount of discardable which is contributing to |
- // memory pressure. |
- if (image_data->decode.is_locked()) { |
- dump->AddScalar("locked_size", |
- base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
- image_data->size); |
+ |
+ if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) { |
+ std::string dump_name = |
+ base::StringPrintf("cc/image_memory/controller_0x%" PRIXPTR, |
+ reinterpret_cast<uintptr_t>(this)); |
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name); |
+ dump->AddScalar(MemoryAllocatorDump::kNameSize, |
+ MemoryAllocatorDump::kUnitsBytes, bytes_used_); |
ssid
2016/10/07 00:07:21
if you do return true; here and remove else, it re
ericrk
2016/10/13 23:58:13
Done.
|
+ } else { |
+ for (const auto& image_pair : persistent_cache_) { |
+ const ImageData* image_data = image_pair.second.get(); |
+ const uint32_t image_id = image_pair.first; |
+ |
+ // If we have discardable decoded data, dump this here. |
+ if (image_data->decode.data()) { |
+ std::string discardable_dump_name = base::StringPrintf( |
+ "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", |
+ reinterpret_cast<uintptr_t>(this), image_id); |
+ MemoryAllocatorDump* dump = |
+ image_data->decode.data()->CreateMemoryAllocatorDump( |
+ discardable_dump_name.c_str(), pmd); |
+ // If our image is locked, dump the "locked_size" as an additional |
+ // column. |
+ // This lets us see the amount of discardable which is contributing to |
+ // memory pressure. |
+ if (image_data->decode.is_locked()) { |
+ dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes, |
+ image_data->size); |
+ } |
} |
- } |
- // If we have an uploaded image (that is actually on the GPU, not just a CPU |
- // wrapper), upload it here. |
- if (image_data->upload.image() && |
- image_data->mode == DecodedDataMode::GPU) { |
- std::string gpu_dump_name = base::StringPrintf( |
- "cc/image_memory/controller_0x%" PRIXPTR "/gpu/image_%d", |
- reinterpret_cast<uintptr_t>(this), image_id); |
- base::trace_event::MemoryAllocatorDump* dump = |
- pmd->CreateAllocatorDump(gpu_dump_name); |
- dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
- base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
- image_data->size); |
- |
- // Create a global shred GUID to associate this data with its GPU process |
- // counterpart. |
- GLuint gl_id = skia::GrBackendObjectToGrGLTextureInfo( |
- image_data->upload.image()->getTextureHandle( |
- false /* flushPendingGrContextIO */)) |
- ->fID; |
- base::trace_event::MemoryAllocatorDumpGuid guid = |
- gl::GetGLTextureClientGUIDForTracing( |
- context_->ContextSupport()->ShareGroupTracingGUID(), gl_id); |
- |
- // kImportance is somewhat arbitrary - we chose 3 to be higher than the |
- // value used in the GPU process (1), and Skia (2), causing us to appear |
- // as the owner in memory traces. |
- const int kImportance = 3; |
- pmd->CreateSharedGlobalAllocatorDump(guid); |
- pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); |
+ // If we have an uploaded image (that is actually on the GPU, not just a |
+ // CPU |
+ // wrapper), upload it here. |
+ if (image_data->upload.image() && |
+ image_data->mode == DecodedDataMode::GPU) { |
+ std::string gpu_dump_name = base::StringPrintf( |
+ "cc/image_memory/controller_0x%" PRIXPTR "/gpu/image_%d", |
+ reinterpret_cast<uintptr_t>(this), image_id); |
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(gpu_dump_name); |
+ dump->AddScalar(MemoryAllocatorDump::kNameSize, |
+ MemoryAllocatorDump::kUnitsBytes, image_data->size); |
+ |
+ // Create a global shred GUID to associate this data with its GPU |
+ // process |
+ // counterpart. |
+ GLuint gl_id = skia::GrBackendObjectToGrGLTextureInfo( |
+ image_data->upload.image()->getTextureHandle( |
+ false /* flushPendingGrContextIO */)) |
+ ->fID; |
+ MemoryAllocatorDumpGuid guid = gl::GetGLTextureClientGUIDForTracing( |
+ context_->ContextSupport()->ShareGroupTracingGUID(), gl_id); |
+ |
+ // kImportance is somewhat arbitrary - we chose 3 to be higher than the |
+ // value used in the GPU process (1), and Skia (2), causing us to appear |
+ // as the owner in memory traces. |
+ const int kImportance = 3; |
+ pmd->CreateSharedGlobalAllocatorDump(guid); |
+ pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); |
+ } |
} |
} |
- |
return true; |
} |