OLD | NEW |
---|---|
1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import csv | 5 import csv |
6 import logging | 6 import logging |
7 import json | 7 import json |
8 import logging | |
8 import os | 9 import os |
9 import shutil | 10 import shutil |
10 | 11 |
11 import chrome_cache | 12 import chrome_cache |
12 import common_util | 13 import common_util |
13 import emulation | 14 import emulation |
15 from loading_trace import LoadingTrace | |
pasko
2016/05/31 16:00:01
more consistent: 'import loading_trace'.
gabadie
2016/06/01 12:04:34
Done.
| |
14 import sandwich_metrics | 16 import sandwich_metrics |
15 import sandwich_misc | 17 import sandwich_misc |
16 import sandwich_runner | 18 import sandwich_runner |
17 import task_manager | 19 import task_manager |
18 | 20 |
19 | 21 |
20 def NetworkSimulationTransformer(network_condition): | 22 def NetworkSimulationTransformer(network_condition): |
21 """Creates a function that accepts a SandwichRunner as a parameter and sets | 23 """Creates a function that accepts a SandwichRunner as a parameter and sets |
22 network emulation options on it. | 24 network emulation options on it. |
23 | 25 |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
179 SandwichRunner.Run() in the given order. | 181 SandwichRunner.Run() in the given order. |
180 | 182 |
181 Here is the full dependency of the added tree for the returned task: | 183 Here is the full dependency of the added tree for the returned task: |
182 <transformer_list_name>/<subresource_discoverer>-metrics.csv | 184 <transformer_list_name>/<subresource_discoverer>-metrics.csv |
183 depends on: <transformer_list_name>/<subresource_discoverer>-run/ | 185 depends on: <transformer_list_name>/<subresource_discoverer>-run/ |
184 depends on: common/<subresource_discoverer>-cache.zip | 186 depends on: common/<subresource_discoverer>-cache.zip |
185 depends on: some tasks saved by PopulateCommonPipelines() | 187 depends on: some tasks saved by PopulateCommonPipelines() |
186 depends on: common/<subresource_discoverer>-setup.json | 188 depends on: common/<subresource_discoverer>-setup.json |
187 depends on: some tasks saved by PopulateCommonPipelines() | 189 depends on: some tasks saved by PopulateCommonPipelines() |
188 """ | 190 """ |
191 ADDITIONAL_COLUMN_NAMES = [ | |
192 'url', | |
193 'repeat_id', | |
194 'subresource_discoverer', | |
195 'subresource_count', | |
196 # The amount of subresources detected at SetupBenchmark step. | |
197 'subresource_count_theoretic', | |
198 # Amount of subresources for caching as suggested by the subresource | |
199 # discoverer. | |
200 'cached_subresource_count_theoretic', | |
201 'cached_subresource_count'] | |
202 | |
189 assert subresource_discoverer in sandwich_misc.SUBRESOURCE_DISCOVERERS | 203 assert subresource_discoverer in sandwich_misc.SUBRESOURCE_DISCOVERERS |
190 assert 'common' not in sandwich_misc.SUBRESOURCE_DISCOVERERS | 204 assert 'common' not in sandwich_misc.SUBRESOURCE_DISCOVERERS |
191 shared_task_prefix = os.path.join('common', subresource_discoverer) | 205 shared_task_prefix = os.path.join('common', subresource_discoverer) |
192 task_prefix = os.path.join(transformer_list_name, subresource_discoverer) | 206 task_prefix = os.path.join(transformer_list_name, subresource_discoverer) |
193 | 207 |
194 @self.RegisterTask(shared_task_prefix + '-setup.json', merge=True, | 208 @self.RegisterTask(shared_task_prefix + '-setup.json', merge=True, |
195 dependencies=[self._subresources_for_urls_task]) | 209 dependencies=[self._subresources_for_urls_task]) |
196 def SetupBenchmark(): | 210 def SetupBenchmark(): |
197 whitelisted_urls = sandwich_misc.ExtractDiscoverableUrls( | 211 whitelisted_urls = sandwich_misc.ExtractDiscoverableUrls( |
198 self._trace_from_grabbing_reference_cache, subresource_discoverer) | 212 self._trace_from_grabbing_reference_cache, subresource_discoverer) |
(...skipping 27 matching lines...) Expand all Loading... | |
226 runner.wpr_out_log_path = os.path.join( | 240 runner.wpr_out_log_path = os.path.join( |
227 RunBenchmark.path, sandwich_runner.WPR_LOG_FILENAME) | 241 RunBenchmark.path, sandwich_runner.WPR_LOG_FILENAME) |
228 runner.cache_archive_path = BuildBenchmarkCacheArchive.path | 242 runner.cache_archive_path = BuildBenchmarkCacheArchive.path |
229 runner.cache_operation = sandwich_runner.CacheOperation.PUSH | 243 runner.cache_operation = sandwich_runner.CacheOperation.PUSH |
230 runner.output_dir = RunBenchmark.path | 244 runner.output_dir = RunBenchmark.path |
231 runner.Run() | 245 runner.Run() |
232 | 246 |
233 @self.RegisterTask(task_prefix + '-metrics.csv', | 247 @self.RegisterTask(task_prefix + '-metrics.csv', |
234 dependencies=[RunBenchmark]) | 248 dependencies=[RunBenchmark]) |
235 def ExtractMetrics(): | 249 def ExtractMetrics(): |
250 # TODO PERF IMPROVMENT(gabadie): Can load trace only once an use it for | |
pasko
2016/05/31 16:00:01
do you want to make this improvement in a followup
pasko
2016/05/31 16:00:01
The format of TODO is somewhat strict to allow som
gabadie
2016/06/01 12:04:34
Separate.
gabadie
2016/06/01 12:04:34
Done.
| |
251 # validation and metrics extraction. | |
236 sandwich_misc.VerifyBenchmarkOutputDirectory( | 252 sandwich_misc.VerifyBenchmarkOutputDirectory( |
237 SetupBenchmark.path, RunBenchmark.path) | 253 SetupBenchmark.path, RunBenchmark.path) |
238 trace_metrics_list = \ | 254 |
239 sandwich_metrics.ExtractMetricsFromRunnerOutputDirectory( | 255 benchmark_setup = json.load(open(SetupBenchmark.path)) |
240 SetupBenchmark.path, RunBenchmark.path) | 256 run_metrics_list = [] |
241 trace_metrics_list.sort(key=lambda e: e['repeat_id']) | 257 for repeat_id, repeat_dir in sandwich_runner.WalkRepeatedRuns( |
258 RunBenchmark.path): | |
259 trace_path = os.path.join(repeat_dir, sandwich_runner.TRACE_FILENAME) | |
260 logging.info('processing trace \'%s\'' % trace_path) | |
pasko
2016/05/31 16:00:01
logging.info('processing trace: %s', trace_path)
gabadie
2016/06/01 12:04:34
Done.
| |
261 trace = LoadingTrace.FromJsonFile(trace_path) | |
262 run_metrics = { | |
263 'url': trace.url, | |
264 'repeat_id': repeat_id, | |
265 'subresource_discoverer': benchmark_setup['subresource_discoverer'], | |
266 'subresource_count': len(sandwich_misc.ListUrlRequests( | |
267 trace, sandwich_misc.RequestOutcome.All)), | |
268 'subresource_count_theoretic': | |
269 len(benchmark_setup['url_resources']), | |
270 'cached_subresource_count': len(sandwich_misc.ListUrlRequests( | |
271 trace, sandwich_misc.RequestOutcome.ServedFromCache)), | |
272 'cached_subresource_count_theoretic': | |
273 len(benchmark_setup['cache_whitelist']), | |
274 } | |
275 run_metrics.update( | |
276 sandwich_metrics.ExtractCommonMetricsFromRepeatDirectory( | |
277 repeat_dir, trace)) | |
278 run_metrics_list.append(run_metrics) | |
279 | |
280 run_metrics_list.sort(key=lambda e: e['repeat_id']) | |
242 with open(ExtractMetrics.path, 'w') as csv_file: | 281 with open(ExtractMetrics.path, 'w') as csv_file: |
243 writer = csv.DictWriter(csv_file, | 282 writer = csv.DictWriter(csv_file, fieldnames=(ADDITIONAL_COLUMN_NAMES + |
244 fieldnames=sandwich_metrics.CSV_FIELD_NAMES) | 283 sandwich_metrics.COMMON_CSV_COLUMN_NAMES)) |
245 writer.writeheader() | 284 writer.writeheader() |
246 for trace_metrics in trace_metrics_list: | 285 for trace_metrics in run_metrics_list: |
247 writer.writerow(trace_metrics) | 286 writer.writerow(trace_metrics) |
248 | 287 |
249 self._common_builder.default_final_tasks.append(ExtractMetrics) | 288 self._common_builder.default_final_tasks.append(ExtractMetrics) |
OLD | NEW |