Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: mozdownload/factory.py

Issue 1451373002: Updating mozdownload (excluding tests) (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/mozdownload@master
Patch Set: Updated README.md Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mozdownload/errors.py ('k') | mozdownload/parser.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5 from . import errors
6 from . import scraper
7
8
9 # List of known download scrapers
10 scraper_types = {'candidate': scraper.ReleaseCandidateScraper,
11 'daily': scraper.DailyScraper,
12 'direct': scraper.DirectScraper,
13 'release': scraper.ReleaseScraper,
14 'tinderbox': scraper.TinderboxScraper,
15 'try': scraper.TryScraper,
16 }
17
18
19 class FactoryScraper(scraper.Scraper):
20
21 def __init__(self, scraper_type, **kwargs):
22 """Creates an instance of a scraper class based on the given type.
23
24 :param scraper_type: The type of scraper to use.
25
26 Scraper:
27 :param application: The name of the application to download.
28 :param base_url: The base url to be used
29 :param build_number: Number of the build (for candidate, daily, and tind erbox builds).
30 :param destination: Directory or file name to download the file to.
31 :param extension: File extension of the build (e.g. ".zip").
32 :param is_stub_installer: Stub installer (Only applicable to Windows bui lds).
33 :param locale: Locale of the application.
34 :param log_level: Threshold for log output.
35 :param password: Password for basic HTTP authentication.
36 :param platform: Platform of the application
37 :param retry_attempts: Number of times the download will be attempted
38 in the event of a failure
39 :param retry_delay: Amount of time (in seconds) to wait between retry at tempts.
40 :param timeout: Amount of time (in seconds) until a download times out.
41 :param url: URL to download.
42 :param username: Username for basic HTTP authentication.
43 :param version: Version of the application to be downloaded.
44
45 Daily builds:
46 :param branch: Name of the branch.
47 :param build_id: ID of the build to download.
48 :param date: Date of the build.
49
50 Tinderbox:
51 :param debug_build: Download a debug build.
52
53 Try:
54 :param changeset: Changeset of the try build to download.
55
56 """
57 # Check for valid arguments
58 if scraper_type in ('candidate', 'release') and not kwargs.get('version' ):
59 raise ValueError('The version to download has to be specified.')
60
61 if kwargs.get('application') == 'b2g' and scraper_type in ('candidate', 'release'):
62 error_msg = '%s build is not yet supported for B2G' % scraper_type
63 raise errors.NotSupportedError(error_msg)
64
65 if kwargs.get('application') == 'fennec' and scraper_type not in ('daily '):
66 error_msg = '%s build is not yet supported for fennec' % scraper_typ e
67 raise errors.NotSupportedError(error_msg)
68
69 # Instantiate scraper and download the build
70 scraper_keywords = {'application': kwargs.get('application', 'firefox'),
71 'base_url': kwargs.get('base_url', scraper.BASE_URL) ,
72 'destination': kwargs.get('destination'),
73 'extension': kwargs.get('extension'),
74 'is_stub_installer': kwargs.get('is_stub_installer') ,
75 'locale': kwargs.get('locale'),
76 'log_level': kwargs.get('log_level', 'INFO'),
77 'password': kwargs.get('password'),
78 'platform': kwargs.get('platform'),
79 'retry_attempts': kwargs.get('retry_attempts', 0),
80 'retry_delay': kwargs.get('retry_delay', 10),
81 'timeout': kwargs.get('timeout'),
82 'username': kwargs.get('username'),
83 }
84
85 scraper_type_keywords = {
86 'release': {
87 'version': kwargs.get('version'),
88 },
89 'candidate': {
90 'build_number': kwargs.get('build_number'),
91 'version': kwargs.get('version'),
92 },
93 'daily': {
94 'branch': kwargs.get('branch', 'mozilla-central'),
95 'build_number': kwargs.get('build_number'),
96 'build_id': kwargs.get('build_id'),
97 'date': kwargs.get('date'),
98 },
99 'direct': {
100 'url': kwargs.get('url'),
101 },
102 'tinderbox': {
103 'branch': kwargs.get('branch', 'mozilla-central'),
104 'build_number': kwargs.get('build_number'),
105 'date': kwargs.get('date'),
106 'debug_build': kwargs.get('debug_build', False),
107 },
108 'try': {
109 'changeset': kwargs.get('changeset'),
110 'debug_build': kwargs.get('debug_build', False),
111 },
112 }
113
114 kwargs = scraper_keywords.copy()
115 kwargs.update(scraper_type_keywords.get(scraper_type, {}))
116
117 self.__class__ = scraper_types[scraper_type]
118 scraper_types[scraper_type].__init__(self, **kwargs)
OLDNEW
« no previous file with comments | « mozdownload/errors.py ('k') | mozdownload/parser.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698