OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 import argparse | 6 import argparse |
7 import itertools | 7 import itertools |
| 8 import json |
8 import os | 9 import os |
9 import platform | 10 import platform |
10 import re | 11 import re |
| 12 import shutil |
11 import sys | 13 import sys |
12 import tempfile | 14 import tempfile |
13 | 15 |
14 | 16 |
15 _HERE_PATH = os.path.dirname(__file__) | 17 _HERE_PATH = os.path.dirname(__file__) |
16 _SRC_PATH = os.path.normpath(os.path.join(_HERE_PATH, '..', '..', '..')) | 18 _SRC_PATH = os.path.normpath(os.path.join(_HERE_PATH, '..', '..', '..')) |
17 _CWD = os.getcwd() # NOTE(dbeam): this is typically out/<gn_name>/. | 19 _CWD = os.getcwd() # NOTE(dbeam): this is typically out/<gn_name>/. |
18 | 20 |
19 sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'node')) | 21 sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'node')) |
20 import node | 22 import node |
(...skipping 15 matching lines...) Expand all Loading... |
36 _JS_RESOURCES_PATH = os.path.join(_RESOURCES_PATH, 'js') | 38 _JS_RESOURCES_PATH = os.path.join(_RESOURCES_PATH, 'js') |
37 | 39 |
38 | 40 |
39 _POLYMER_PATH = os.path.join( | 41 _POLYMER_PATH = os.path.join( |
40 _SRC_PATH, 'third_party', 'polymer', 'v1_0', 'components-chromium') | 42 _SRC_PATH, 'third_party', 'polymer', 'v1_0', 'components-chromium') |
41 | 43 |
42 | 44 |
43 _VULCANIZE_BASE_ARGS = [ | 45 _VULCANIZE_BASE_ARGS = [ |
44 # These files are already combined and minified. | 46 # These files are already combined and minified. |
45 '--exclude', 'chrome://resources/html/polymer.html', | 47 '--exclude', 'chrome://resources/html/polymer.html', |
46 '--exclude', 'web-animations-next-lite.min.js', | 48 '--exclude', 'chrome://resources/polymer/v1_0/polymer/polymer.html', |
| 49 '--exclude', 'chrome://resources/polymer/v1_0/polymer/polymer-micro.html', |
| 50 '--exclude', 'chrome://resources/polymer/v1_0/polymer/polymer-mini.html', |
| 51 '--exclude', 'chrome://resources/polymer/v1_0/web-animations-js/' + |
| 52 'web-animations-next-lite.min.js', |
47 | 53 |
48 # These files are dynamically created by C++. | 54 '--exclude', 'chrome://resources/css/roboto.css', |
49 '--exclude', 'load_time_data.js', | 55 '--exclude', 'chrome://resources/css/text_defaults.css', |
50 '--exclude', 'strings.js', | 56 '--exclude', 'chrome://resources/css/text_defaults_md.css', |
51 '--exclude', 'text_defaults.css', | 57 '--exclude', 'chrome://resources/js/load_time_data.js', |
52 '--exclude', 'text_defaults_md.css', | |
53 | 58 |
54 '--inline-css', | 59 '--inline-css', |
55 '--inline-scripts', | 60 '--inline-scripts', |
| 61 '--rewrite-urls-in-templates', |
56 '--strip-comments', | 62 '--strip-comments', |
57 ] | 63 ] |
58 | 64 |
59 | 65 |
60 _URL_MAPPINGS = [ | 66 _URL_MAPPINGS = [ |
61 ('chrome://resources/cr_elements/', _CR_ELEMENTS_PATH), | 67 ('chrome://resources/cr_elements/', _CR_ELEMENTS_PATH), |
62 ('chrome://resources/css/', _CSS_RESOURCES_PATH), | 68 ('chrome://resources/css/', _CSS_RESOURCES_PATH), |
63 ('chrome://resources/html/', _HTML_RESOURCES_PATH), | 69 ('chrome://resources/html/', _HTML_RESOURCES_PATH), |
64 ('chrome://resources/js/', _JS_RESOURCES_PATH), | 70 ('chrome://resources/js/', _JS_RESOURCES_PATH), |
65 ('chrome://resources/polymer/v1_0/', _POLYMER_PATH) | 71 ('chrome://resources/polymer/v1_0/', _POLYMER_PATH) |
66 ] | 72 ] |
67 | 73 |
68 | 74 |
69 _VULCANIZE_REDIRECT_ARGS = list(itertools.chain.from_iterable(map( | 75 _VULCANIZE_REDIRECT_ARGS = list(itertools.chain.from_iterable(map( |
70 lambda m: ['--redirect', '"%s|%s"' % (m[0], m[1])], _URL_MAPPINGS))) | 76 lambda m: ['--redirect', '"%s|%s"' % (m[0], m[1])], _URL_MAPPINGS))) |
71 | 77 |
72 | 78 |
73 def _undo_mapping(mappings, url): | 79 def _undo_mapping(mappings, url): |
74 for (redirect_url, file_path) in mappings: | 80 for (redirect_url, file_path) in mappings: |
75 if url.startswith(redirect_url): | 81 if url.startswith(redirect_url): |
76 return url.replace(redirect_url, file_path + os.sep) | 82 return url.replace(redirect_url, file_path + os.sep) |
77 # TODO(dbeam): can we make this stricter? | 83 # TODO(dbeam): can we make this stricter? |
78 return url | 84 return url |
79 | 85 |
80 def _request_list_path(out_path, html_out_file): | 86 def _request_list_path(out_path, host): |
81 return os.path.join(out_path, html_out_file + '_requestlist.txt') | 87 return os.path.join(out_path, host + '_requestlist.txt') |
82 | 88 |
83 # Get a list of all files that were bundled with Vulcanize and update the | 89 # Get a list of all files that were bundled with polymer-bundler and update the |
84 # depfile accordingly such that Ninja knows when to trigger re-vulcanization. | 90 # depfile accordingly such that Ninja knows when to re-trigger. |
85 def _update_dep_file(in_folder, args): | 91 def _update_dep_file(in_folder, args, manifest): |
86 in_path = os.path.join(_CWD, in_folder) | 92 in_path = os.path.join(_CWD, in_folder) |
87 out_path = os.path.join(_CWD, args.out_folder) | |
88 | 93 |
89 # Prior call to vulcanize already generated the deps list, grab it from there. | 94 # Gather the dependencies of all bundled root HTML files. |
90 request_list_path = _request_list_path(out_path, args.html_out_file) | 95 request_list = [] |
91 request_list = open(request_list_path, 'r').read().splitlines() | 96 for html_file in manifest: |
| 97 request_list += manifest[html_file] |
92 | 98 |
93 if platform.system() == 'Windows': | 99 # Add a slash in front of every dependency that is not a chrome:// URL, so |
94 # TODO(dbeam): UGH. For some reason Vulcanize is interpreting the target | 100 # that we can map it to the correct source file path below. |
95 # file path as a URL and using the drive letter (e.g. D:\) as a protocol. | 101 request_list = map( |
96 # This is a little insane, but we're fixing here by normalizing case (which | 102 lambda dep: '/' + dep if not dep.startswith('chrome://') else dep, |
97 # really shouldn't matter, these are all file paths and generally are all | 103 request_list) |
98 # lower case) and writing from / to \ (file path) and then back again. This | |
99 # is compounded by NodeJS having a bug in url.resolve() that handles | |
100 # chrome:// protocol URLs poorly as well as us using startswith() to strip | |
101 # file paths (which isn't crazy awesome either). Don't remove unless you | |
102 # really really know what you're doing. | |
103 norm = lambda u: u.lower().replace('/', '\\') | |
104 request_list = [norm(u).replace(norm(in_path), '').replace('\\', '/') | |
105 for u in request_list] | |
106 | 104 |
107 # Undo the URL mappings applied by vulcanize to get file paths relative to | 105 # Undo the URL mappings applied by vulcanize to get file paths relative to |
108 # current working directory. | 106 # current working directory. |
109 url_mappings = _URL_MAPPINGS + [ | 107 url_mappings = _URL_MAPPINGS + [ |
110 ('/', os.path.relpath(in_path, _CWD)), | 108 ('/', os.path.relpath(in_path, _CWD)), |
111 ('chrome://%s/' % args.host, os.path.relpath(in_path, _CWD)), | 109 ('chrome://%s/' % args.host, os.path.relpath(in_path, _CWD)), |
112 ] | 110 ] |
113 | 111 |
114 deps = [_undo_mapping(url_mappings, u) for u in request_list] | 112 deps = [_undo_mapping(url_mappings, u) for u in request_list] |
115 deps = map(os.path.normpath, deps) | 113 deps = map(os.path.normpath, deps) |
116 | 114 |
117 # If the input was a folder holding an unpacked .pak file, the generated | 115 # If the input was a folder holding an unpacked .pak file, the generated |
118 # depfile should not list files already in the .pak file. | 116 # depfile should not list files already in the .pak file. |
119 if args.input.endswith('.unpak'): | 117 if args.input.endswith('.unpak'): |
120 filter_url = args.input | 118 filter_url = args.input |
121 deps = [d for d in deps if not d.startswith(filter_url)] | 119 deps = [d for d in deps if not d.startswith(filter_url)] |
122 | 120 |
123 with open(os.path.join(_CWD, args.depfile), 'w') as f: | 121 with open(os.path.join(_CWD, args.depfile), 'w') as f: |
124 deps_file_header = os.path.join(args.out_folder, args.html_out_file) | 122 deps_file_header = os.path.join(args.out_folder, args.html_out_files[0]) |
125 f.write(deps_file_header + ': ' + ' '.join(deps)) | 123 f.write(deps_file_header + ': ' + ' '.join(deps)) |
126 | 124 |
127 | 125 |
128 def _vulcanize(in_folder, args): | 126 def _vulcanize(in_folder, args): |
129 in_path = os.path.normpath(os.path.join(_CWD, in_folder)) | 127 in_path = os.path.normpath(os.path.join(_CWD, in_folder)) |
130 out_path = os.path.join(_CWD, args.out_folder) | 128 out_path = os.path.join(_CWD, args.out_folder) |
131 | 129 manifest_out_path = _request_list_path(out_path, args.host) |
132 html_out_path = os.path.join(out_path, args.html_out_file) | |
133 js_out_path = os.path.join(out_path, args.js_out_file) | |
134 | 130 |
135 exclude_args = [] | 131 exclude_args = [] |
136 for f in args.exclude or []: | 132 for f in args.exclude or []: |
137 exclude_args.append('--exclude') | 133 exclude_args.append('--exclude') |
138 exclude_args.append(f) | 134 exclude_args.append(f) |
139 | 135 |
140 output = node.RunNode( | 136 in_html_args = [] |
141 [node_modules.PathToVulcanize()] + | 137 for f in args.html_in_files: |
| 138 in_html_args.append('--in-html') |
| 139 in_html_args.append(f) |
| 140 |
| 141 tmp_out_dir = os.path.join(out_path, 'bundled') |
| 142 node.RunNode( |
| 143 [node_modules.PathToBundler()] + |
142 _VULCANIZE_BASE_ARGS + _VULCANIZE_REDIRECT_ARGS + exclude_args + | 144 _VULCANIZE_BASE_ARGS + _VULCANIZE_REDIRECT_ARGS + exclude_args + |
143 ['--out-request-list', _request_list_path(out_path, args.html_out_file), | 145 [# This file is dynamically created by C++. Need to specify an exclusion |
144 '--redirect', '"/|%s"' % in_path, | 146 # URL for both the relative URL and chrome:// URL syntax. |
| 147 '--exclude', 'strings.js', |
| 148 '--exclude', 'chrome://%s/strings.js' % args.host, |
| 149 |
| 150 '--manifest-out', manifest_out_path, |
| 151 '--root', in_path, |
145 '--redirect', '"chrome://%s/|%s"' % (args.host, in_path), | 152 '--redirect', '"chrome://%s/|%s"' % (args.host, in_path), |
146 # TODO(dpapad): Figure out why vulcanize treats the input path | 153 '--out-dir', os.path.relpath(tmp_out_dir, _CWD), |
147 # differently on Windows VS Linux/Mac. | 154 '--shell', args.html_in_files[0], |
148 os.path.join( | 155 ] + in_html_args) |
149 in_path if platform.system() == 'Windows' else os.sep, | |
150 args.html_in_file)]) | |
151 | 156 |
152 # Grit includes are not supported, use HTML imports instead. | 157 for index, html_file in enumerate(args.html_in_files): |
153 output = output.replace('<include src="', '<include src-disabled="') | 158 with open( |
| 159 os.path.join(os.path.relpath(tmp_out_dir, _CWD), html_file), 'r') as f: |
| 160 output = f.read() |
154 | 161 |
155 if args.insert_in_head: | 162 # Grit includes are not supported, use HTML imports instead. |
156 assert '<head>' in output | 163 output = output.replace('<include src="', '<include src-disabled="') |
157 # NOTE(dbeam): Vulcanize eats <base> tags after processing. This undoes | |
158 # that by adding a <base> tag to the (post-processed) generated output. | |
159 output = output.replace('<head>', '<head>' + args.insert_in_head) | |
160 | 164 |
161 crisper_input = tempfile.NamedTemporaryFile(mode='wt+', delete=False) | 165 if args.insert_in_head: |
162 crisper_input.write(output) | 166 assert '<head>' in output |
163 crisper_input.close() | 167 # NOTE(dbeam): polymer-bundler eats <base> tags after processing. This |
| 168 # undoes that by adding a <base> tag to the (post-processed) generated |
| 169 # output. |
| 170 output = output.replace('<head>', '<head>' + args.insert_in_head) |
164 | 171 |
165 crisper_output = tempfile.NamedTemporaryFile(mode='wt+', delete=False) | 172 # Open file again with 'w' such that the previous contents are overwritten. |
166 crisper_output.close() | 173 with open( |
| 174 os.path.join(os.path.relpath(tmp_out_dir, _CWD), html_file), 'w') as f: |
| 175 f.write(output) |
| 176 f.close() |
167 | 177 |
168 try: | 178 try: |
169 node.RunNode([node_modules.PathToCrisper(), | 179 for index, html_in_file in enumerate(args.html_in_files): |
170 '--source', crisper_input.name, | 180 html_out_file = args.html_out_files[index] |
171 '--script-in-head', 'false', | 181 js_out_file = args.js_out_files[index] |
172 '--only-split', | |
173 '--html', html_out_path, | |
174 '--js', crisper_output.name]) | |
175 | 182 |
176 # Crisper by default inserts a <script> tag with the name of the --js file, | 183 # Run crisper to separate the JS from the HTML file. |
177 # but since we are using a temporary file, need to manually insert a | 184 node.RunNode([node_modules.PathToCrisper(), |
178 # <script> tag with the correct final filename (in combination with | 185 '--source', os.path.join(tmp_out_dir, html_in_file), |
179 # --only-split flag). There is no way currently to manually specify the | 186 '--script-in-head', 'false', |
180 # <script> tag's path, see https://github.com/PolymerLabs/crisper/issues/46. | 187 '--html', os.path.join(tmp_out_dir, html_out_file), |
181 with open(html_out_path, 'r+') as f: | 188 '--js', os.path.join(tmp_out_dir, js_out_file)]) |
182 data = f.read() | |
183 new_data = data.replace( | |
184 '</body></html>', | |
185 '<script src="' + args.js_out_file + '"></script></body></html>') | |
186 assert new_data != data, 'Expected to find </body></html> token.' | |
187 f.seek(0) | |
188 f.write(new_data) | |
189 f.truncate() | |
190 | 189 |
191 node.RunNode([node_modules.PathToUglify(), crisper_output.name, | 190 # Move the HTML file to its final destination. |
192 '--comments', '"/Copyright|license|LICENSE|\<\/?if/"', | 191 shutil.copy(os.path.join(tmp_out_dir, html_out_file), out_path) |
193 '--output', js_out_path]) | 192 |
| 193 # Pass the JS file through Uglify and write the output to its final |
| 194 # destination. |
| 195 node.RunNode([node_modules.PathToUglify(), |
| 196 os.path.join(tmp_out_dir, js_out_file), |
| 197 '--comments', '"/Copyright|license|LICENSE|\<\/?if/"', |
| 198 '--output', os.path.join(out_path, js_out_file)]) |
194 finally: | 199 finally: |
195 if os.path.exists(crisper_input.name): | 200 shutil.rmtree(tmp_out_dir) |
196 os.remove(crisper_input.name) | 201 return manifest_out_path |
197 if os.path.exists(crisper_output.name): | |
198 os.remove(crisper_output.name) | |
199 | 202 |
200 | 203 |
201 def main(argv): | 204 def main(argv): |
202 parser = argparse.ArgumentParser() | 205 parser = argparse.ArgumentParser() |
203 parser.add_argument('--depfile', required=True) | 206 parser.add_argument('--depfile', required=True) |
204 parser.add_argument('--exclude', nargs='*') | 207 parser.add_argument('--exclude', nargs='*') |
205 parser.add_argument('--host', required=True) | 208 parser.add_argument('--host', required=True) |
206 parser.add_argument('--html_in_file', required=True) | 209 parser.add_argument('--html_in_files', nargs='*', required=True) |
207 parser.add_argument('--html_out_file', required=True) | 210 parser.add_argument('--html_out_files', nargs='*', required=True) |
208 parser.add_argument('--input', required=True) | 211 parser.add_argument('--input', required=True) |
209 parser.add_argument('--insert_in_head') | 212 parser.add_argument('--insert_in_head') |
210 parser.add_argument('--js_out_file', required=True) | 213 parser.add_argument('--js_out_files', nargs='*', required=True) |
211 parser.add_argument('--out_folder', required=True) | 214 parser.add_argument('--out_folder', required=True) |
212 args = parser.parse_args(argv) | 215 args = parser.parse_args(argv) |
213 | 216 |
214 # NOTE(dbeam): on Windows, GN can send dirs/like/this. When joined, you might | 217 # NOTE(dbeam): on Windows, GN can send dirs/like/this. When joined, you might |
215 # get dirs/like/this\file.txt. This looks odd to windows. Normalize to right | 218 # get dirs/like/this\file.txt. This looks odd to windows. Normalize to right |
216 # the slashes. | 219 # the slashes. |
217 args.depfile = os.path.normpath(args.depfile) | 220 args.depfile = os.path.normpath(args.depfile) |
218 args.input = os.path.normpath(args.input) | 221 args.input = os.path.normpath(args.input) |
219 args.out_folder = os.path.normpath(args.out_folder) | 222 args.out_folder = os.path.normpath(args.out_folder) |
220 | 223 |
221 _vulcanize(args.input, args) | 224 manifest_out_path = _vulcanize(args.input, args) |
222 _update_dep_file(args.input, args) | 225 |
| 226 # Prior call to _vulcanize() generated an output manifest file, containing |
| 227 # information about all files that were bundled. Grab it from there. |
| 228 manifest = json.loads(open(manifest_out_path, 'r').read()) |
| 229 |
| 230 # polymer-bundler reports any missing files in the output manifest, instead of |
| 231 # directly failing. Ensure that no such files were encountered. |
| 232 if '_missing' in manifest: |
| 233 raise Exception( |
| 234 'polymer-bundler could not find files for the following URLs:\n' + |
| 235 '\n'.join(manifest['_missing'])) |
| 236 |
| 237 _update_dep_file(args.input, args, manifest) |
223 | 238 |
224 | 239 |
225 if __name__ == '__main__': | 240 if __name__ == '__main__': |
226 main(sys.argv[1:]) | 241 main(sys.argv[1:]) |
OLD | NEW |