| Index: tools/android/loading/sandwich_task_builder.py
|
| diff --git a/tools/android/loading/sandwich_task_builder.py b/tools/android/loading/sandwich_task_builder.py
|
| index 26dba8cc9b78b782edb4ed92a8a372baa908c133..64256170a2b02c56426856dd53b7b01cf72948c8 100644
|
| --- a/tools/android/loading/sandwich_task_builder.py
|
| +++ b/tools/android/loading/sandwich_task_builder.py
|
| @@ -4,12 +4,14 @@
|
|
|
| import csv
|
| import json
|
| +import logging
|
| import os
|
| import shutil
|
|
|
| import chrome_cache
|
| import common_util
|
| import emulation
|
| +from loading_trace import LoadingTrace
|
| import sandwich_metrics
|
| import sandwich_misc
|
| import sandwich_runner
|
| @@ -189,6 +191,18 @@ class PrefetchBenchmarkBuilder(task_manager.Builder):
|
| depends on: common/<subresource_discoverer>-setup.json
|
| depends on: some tasks saved by PopulateCommonPipelines()
|
| """
|
| + ADDITIONAL_COLUMN_NAMES = [
|
| + 'url',
|
| + 'repeat_id',
|
| + 'subresource_discoverer',
|
| + 'subresource_count',
|
| + # The amount of subresources detected at SetupBenchmark step.
|
| + 'subresource_count_theoretic',
|
| + # Amount of subresources for caching as suggested by the subresource
|
| + # discoverer.
|
| + 'cached_subresource_count_theoretic',
|
| + 'cached_subresource_count']
|
| +
|
| assert subresource_discoverer in sandwich_misc.SUBRESOURCE_DISCOVERERS
|
| assert 'common' not in sandwich_misc.SUBRESOURCE_DISCOVERERS
|
| shared_task_prefix = os.path.join('common', subresource_discoverer)
|
| @@ -238,17 +252,41 @@ class PrefetchBenchmarkBuilder(task_manager.Builder):
|
| @self.RegisterTask(task_prefix + '-metrics.csv',
|
| dependencies=[RunBenchmark])
|
| def ExtractMetrics():
|
| + # TODO PERF IMPROVMENT(gabadie): Can load trace only once an use it for
|
| + # validation and metrics extraction.
|
| sandwich_misc.VerifyBenchmarkOutputDirectory(
|
| SetupBenchmark.path, RunBenchmark.path)
|
| - trace_metrics_list = \
|
| - sandwich_metrics.ExtractMetricsFromRunnerOutputDirectory(
|
| - SetupBenchmark.path, RunBenchmark.path)
|
| - trace_metrics_list.sort(key=lambda e: e['repeat_id'])
|
| +
|
| + benchmark_setup = json.load(open(SetupBenchmark.path))
|
| + run_metrics_list = []
|
| + for repeat_id, repeat_dir in sandwich_runner.IterRepeatedRuns(
|
| + RunBenchmark.path):
|
| + trace_path = os.path.join(repeat_dir, sandwich_runner.TRACE_FILENAME)
|
| + logging.info('processing trace \'%s\'' % trace_path)
|
| + trace = LoadingTrace.FromJsonFile(trace_path)
|
| + run_metrics = {
|
| + 'url': trace.url,
|
| + 'repeat_id': repeat_id,
|
| + 'subresource_discoverer': benchmark_setup['subresource_discoverer'],
|
| + 'subresource_count': len(sandwich_misc.ListUrlRequests(
|
| + trace, sandwich_misc.RequestOutcome.All)),
|
| + 'subresource_count_theoretic':
|
| + len(benchmark_setup['url_resources']),
|
| + 'cached_subresource_count': len(sandwich_misc.ListUrlRequests(
|
| + trace, sandwich_misc.RequestOutcome.ServedFromCache)),
|
| + 'cached_subresource_count_theoretic':
|
| + len(benchmark_setup['cache_whitelist']),
|
| + }
|
| + run_metrics.update(
|
| + sandwich_metrics.ExtractCommonMetricsFromRepeatDirectory(
|
| + repeat_dir, trace))
|
| +
|
| + run_metrics_list.sort(key=lambda e: e['repeat_id'])
|
| with open(ExtractMetrics.path, 'w') as csv_file:
|
| - writer = csv.DictWriter(csv_file,
|
| - fieldnames=sandwich_metrics.CSV_FIELD_NAMES)
|
| + writer = csv.DictWriter(csv_file, fieldnames=(ADDITIONAL_COLUMN_NAMES +
|
| + sandwich_metrics.COMMON_CSV_COLUMN_NAMES))
|
| writer.writeheader()
|
| - for trace_metrics in trace_metrics_list:
|
| + for trace_metrics in run_metrics_list:
|
| writer.writerow(trace_metrics)
|
|
|
| self._common_builder.default_final_tasks.append(ExtractMetrics)
|
|
|