Index: content/browser/download/download_stats.cc |
diff --git a/content/browser/download/download_stats.cc b/content/browser/download/download_stats.cc |
index 708615419d511191b4d563bd5c0584f514c90476..d2f1c42b709024eb268c1f2cf5f2be7d8c06fabe 100644 |
--- a/content/browser/download/download_stats.cc |
+++ b/content/browser/download/download_stats.cc |
@@ -339,6 +339,14 @@ const base::FilePath::CharType* kDangerousFileTypes[] = { |
FILE_PATH_LITERAL(".udif"), |
}; |
+// The maximum size of the file that every bucket should hold. |
+const int64_t kParallelizableBucketKb[] = {512, 1024, 3072, 10240, 51200}; |
+ |
+// Suffixes for parallelizable downloads bucketed by file size. Each suffix will |
+// generate a new histogram. |
+const char* kParallelizableBucketName[] = {"0.5MB", "1MB", "3MB", |
+ "10MB", "50MB", "GT50MB"}; |
+ |
// Maps extensions to their matching UMA histogram int value. |
int GetDangerousFileType(const base::FilePath& file_path) { |
for (size_t i = 0; i < arraysize(kDangerousFileTypes); ++i) { |
@@ -796,6 +804,11 @@ void RecordParallelizableDownloadStats( |
size_t bytes_downloaded_without_parallel_streams, |
base::TimeDelta time_without_parallel_streams, |
bool uses_parallel_requests) { |
+ RecordParallelizableDownloadAverageStats( |
+ bytes_downloaded_with_parallel_streams + |
+ bytes_downloaded_without_parallel_streams, |
+ time_with_parallel_streams + time_without_parallel_streams); |
+ |
int64_t bandwidth_without_parallel_streams = 0; |
if (bytes_downloaded_without_parallel_streams > 0) { |
bandwidth_without_parallel_streams = CalculateBandwidthBytesPerSecond( |
@@ -863,6 +876,36 @@ void RecordParallelizableDownloadStats( |
} |
} |
+void RecordParallelizableDownloadAverageStats( |
+ int64_t bytes_downloaded, |
+ const base::TimeDelta& time_span) { |
+ if (time_span.is_zero() || bytes_downloaded <= 0) |
+ return; |
+ |
+ int64_t average_bandwidth = |
+ CalculateBandwidthBytesPerSecond(bytes_downloaded, time_span); |
+ int64_t file_size_kb = bytes_downloaded / 1024; |
+ RecordBandwidthMetric("Download.ParallelizableDownloadBandwidth", |
+ average_bandwidth); |
+ UMA_HISTOGRAM_LONG_TIMES("Download.Parallelizable.DownloadTime", time_span); |
+ int64_t max = 1024 * 1024 * 1024; |
qinmin
2017/05/17 19:43:26
nit: make this a constant
xingliu
2017/05/17 21:20:18
Done.
|
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Download.Parallelizable.FileSize", file_size_kb, |
+ 1, max, /* 1 TB as maximum. */ |
+ 256); |
Ilya Sherman
2017/05/17 19:13:07
It looks like you are recording a histogram with 2
xingliu
2017/05/17 21:20:18
Done. Also lower the maximum size.
|
+ |
+ // Download size suffix for average bandwidth and download time metrics. |
+ size_t i = 0; |
+ for (size_t n = arraysize(kParallelizableBucketKb); i < n; ++i) { |
+ if (file_size_kb <= kParallelizableBucketKb[i]) |
+ break; |
+ } |
+ std::string suffix = kParallelizableBucketName[i]; |
+ RecordBandwidthMetric("Download.ParallelizableDownloadBandwidth." + suffix, |
+ average_bandwidth); |
+ base::UmaHistogramLongTimes("Download.Parallelizable.DownloadTime." + suffix, |
+ time_span); |
+} |
+ |
void RecordParallelDownloadCreationEvent(ParallelDownloadCreationEvent event) { |
UMA_HISTOGRAM_ENUMERATION("Download.ParallelDownload.CreationEvent", event, |
ParallelDownloadCreationEvent::COUNT); |