| Index: tools/telemetry/telemetry/unittest/page_set_smoke_test.py
|
| diff --git a/tools/telemetry/telemetry/test_util/page_set_smoke_test.py b/tools/telemetry/telemetry/unittest/page_set_smoke_test.py
|
| similarity index 69%
|
| rename from tools/telemetry/telemetry/test_util/page_set_smoke_test.py
|
| rename to tools/telemetry/telemetry/unittest/page_set_smoke_test.py
|
| index 00d1a4bef05dab168680f57f34a6803c387f31fb..aa30415223b817c1e9baae8cfdefbd97c7cf6405 100644
|
| --- a/tools/telemetry/telemetry/test_util/page_set_smoke_test.py
|
| +++ b/tools/telemetry/telemetry/unittest/page_set_smoke_test.py
|
| @@ -22,23 +22,23 @@ class PageSetSmokeTest(unittest.TestCase):
|
| # Instantiate all page sets and verify that all URLs have an associated
|
| # archive.
|
| page_sets = discover.GetAllPageSetFilenames(page_sets_dir)
|
| - for path in page_sets:
|
| - page_set = page_set_module.PageSet.FromFile(path)
|
| + for page_set_path in page_sets:
|
| + page_set = page_set_module.PageSet.FromFile(page_set_path)
|
|
|
| # TODO: Eventually these should be fatal.
|
| if not page_set.archive_data_file:
|
| - logging.warning('Skipping %s: missing archive data file', path)
|
| - continue
|
| - if not os.path.exists(os.path.join(page_sets_dir,
|
| - page_set.archive_data_file)):
|
| - logging.warning('Skipping %s: archive data file not found', path)
|
| + logging.warning('Skipping %s: no archive data file', page_set_path)
|
| continue
|
|
|
| - wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
|
| - os.path.join(page_sets_dir, page_set.archive_data_file),
|
| - ignore_archive=True)
|
| + logging.info('Testing %s', page_set_path)
|
|
|
| - logging.info('Testing %s', path)
|
| + archive_data_file_path = os.path.join(page_set.base_dir,
|
| + page_set.archive_data_file)
|
| + self.assertTrue(os.path.exists(archive_data_file_path),
|
| + msg='Archive data file not found for %s' % page_set_path)
|
| +
|
| + wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
|
| + archive_data_file_path, ignore_archive=True)
|
| for page in page_set.pages:
|
| if not page.url.startswith('http'):
|
| continue
|
|
|