OLD | NEW |
---|---|
(Empty) | |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 # | |
6 # Most of this file was ported over from Blink's | |
7 # Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py | |
8 # Tools/Scripts/webkitpy/common/net/file_uploader.py | |
9 # | |
10 | |
11 import json | |
12 import logging | |
13 import mimetypes | |
14 import os | |
15 import subprocess | |
16 import sys | |
17 import time | |
18 import urllib2 | |
19 import xml.dom.minidom | |
20 | |
21 _log = logging.getLogger(__name__) | |
22 | |
23 | |
24 def write_json(json_object, file_path, callback=None): | |
25 # Specify separators in order to get compact encoding. | |
bulach
2014/06/20 01:42:36
not sure how much should we care about chromi-fy t
| |
26 json_string = json.dumps(json_object, separators=(',', ':')) | |
27 if callback: | |
28 json_string = callback + "(" + json_string + ");" | |
29 with open(file_path, 'w') as fp: | |
30 fp.write(json_string) | |
31 | |
32 | |
33 def convert_trie_to_flat_paths(trie, prefix=None): | |
34 """Converts the directory structure in the given trie to flat paths, prepend ing a prefix to each.""" | |
35 result = {} | |
36 for name, data in trie.iteritems(): | |
37 if prefix: | |
38 name = prefix + "/" + name | |
39 | |
40 if len(data) and not "results" in data: | |
41 result.update(convert_trie_to_flat_paths(data, name)) | |
42 else: | |
43 result[name] = data | |
44 | |
45 return result | |
46 | |
47 | |
48 def add_path_to_trie(path, value, trie): | |
49 """Inserts a single flat directory path and associated value into a director y trie structure.""" | |
50 if not "/" in path: | |
51 trie[path] = value | |
52 return | |
53 | |
54 directory, slash, rest = path.partition("/") | |
55 if not directory in trie: | |
56 trie[directory] = {} | |
57 add_path_to_trie(rest, value, trie[directory]) | |
58 | |
59 def test_timings_trie(individual_test_timings): | |
60 """Breaks a test name into chunks by directory and puts the test time as a v alue in the lowest part, e.g. | |
61 foo/bar/baz.html: 1ms | |
62 foo/bar/baz1.html: 3ms | |
63 | |
64 becomes | |
65 foo: { | |
66 bar: { | |
67 baz.html: 1, | |
68 baz1.html: 3 | |
69 } | |
70 } | |
71 """ | |
72 trie = {} | |
73 for test_result in individual_test_timings: | |
74 test = test_result.test_name | |
75 | |
76 add_path_to_trie(test, int(1000 * test_result.test_run_time), trie) | |
77 | |
78 return trie | |
79 | |
80 | |
81 class TestResult(object): | |
82 """A simple class that represents a single test result.""" | |
83 | |
84 # Test modifier constants. | |
85 (NONE, FAILS, FLAKY, DISABLED) = range(4) | |
86 | |
87 def __init__(self, test, failed=False, elapsed_time=0): | |
88 self.test_name = test | |
89 self.failed = failed | |
90 self.test_run_time = elapsed_time | |
91 | |
92 test_name = test | |
93 try: | |
94 test_name = test.split('.')[1] | |
95 except IndexError: | |
96 _log.warn("Invalid test name: %s.", test) | |
97 pass | |
98 | |
99 if test_name.startswith('FAILS_'): | |
100 self.modifier = self.FAILS | |
101 elif test_name.startswith('FLAKY_'): | |
102 self.modifier = self.FLAKY | |
103 elif test_name.startswith('DISABLED_'): | |
104 self.modifier = self.DISABLED | |
105 else: | |
106 self.modifier = self.NONE | |
107 | |
108 def fixable(self): | |
109 return self.failed or self.modifier == self.DISABLED | |
110 | |
111 | |
112 class JSONResultsGeneratorBase(object): | |
113 """A JSON results generator for generic tests.""" | |
114 | |
115 MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 | |
116 # Min time (seconds) that will be added to the JSON. | |
117 MIN_TIME = 1 | |
118 | |
119 # Note that in non-chromium tests those chars are used to indicate | |
120 # test modifiers (FAILS, FLAKY, etc) but not actual test results. | |
121 PASS_RESULT = "P" | |
122 SKIP_RESULT = "X" | |
123 FAIL_RESULT = "F" | |
124 FLAKY_RESULT = "L" | |
125 NO_DATA_RESULT = "N" | |
126 | |
127 MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT, | |
128 TestResult.DISABLED: SKIP_RESULT, | |
129 TestResult.FAILS: FAIL_RESULT, | |
130 TestResult.FLAKY: FLAKY_RESULT} | |
131 | |
132 VERSION = 4 | |
133 VERSION_KEY = "version" | |
134 RESULTS = "results" | |
135 TIMES = "times" | |
136 BUILD_NUMBERS = "buildNumbers" | |
137 TIME = "secondsSinceEpoch" | |
138 TESTS = "tests" | |
139 | |
140 FIXABLE_COUNT = "fixableCount" | |
141 FIXABLE = "fixableCounts" | |
142 ALL_FIXABLE_COUNT = "allFixableCount" | |
143 | |
144 RESULTS_FILENAME = "results.json" | |
145 TIMES_MS_FILENAME = "times_ms.json" | |
146 INCREMENTAL_RESULTS_FILENAME = "incremental_results.json" | |
147 | |
148 URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson =1&testtype=%s&master=%s" | |
149 | |
150 # FIXME: Remove generate_incremental_results once the reference to it in | |
151 # http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtes t_slave_utils.py | |
152 # has been removed. | |
153 def __init__(self, builder_name, build_name, build_number, | |
154 results_file_base_path, builder_base_url, | |
155 test_results_map, svn_repositories=None, | |
156 test_results_server=None, | |
157 test_type="", | |
158 master_name="", | |
159 generate_incremental_results=None): | |
160 """Modifies the results.json file. Grabs it off the archive directory | |
161 if it is not found locally. | |
162 | |
163 Args | |
164 builder_name: the builder name (e.g. Webkit). | |
165 build_name: the build name (e.g. webkit-rel). | |
166 build_number: the build number. | |
167 results_file_base_path: Absolute path to the directory containing the | |
168 results json file. | |
169 builder_base_url: the URL where we have the archived test results. | |
170 If this is None no archived results will be retrieved. | |
171 test_results_map: A dictionary that maps test_name to TestResult. | |
172 svn_repositories: A (json_field_name, svn_path) pair for SVN | |
173 repositories that tests rely on. The SVN revision will be | |
174 included in the JSON with the given json_field_name. | |
175 test_results_server: server that hosts test results json. | |
176 test_type: test type string (e.g. 'layout-tests'). | |
177 master_name: the name of the buildbot master. | |
178 """ | |
179 self._builder_name = builder_name | |
180 self._build_name = build_name | |
181 self._build_number = build_number | |
182 self._builder_base_url = builder_base_url | |
183 self._results_directory = results_file_base_path | |
184 | |
185 self._test_results_map = test_results_map | |
186 self._test_results = test_results_map.values() | |
187 | |
188 self._svn_repositories = svn_repositories | |
189 if not self._svn_repositories: | |
190 self._svn_repositories = {} | |
191 | |
192 self._test_results_server = test_results_server | |
193 self._test_type = test_type | |
194 self._master_name = master_name | |
195 | |
196 self._archived_results = None | |
197 | |
198 def generate_json_output(self): | |
199 json_object = self.get_json() | |
200 if json_object: | |
201 file_path = os.path.join(self._results_directory, self.INCREMENTAL_R ESULTS_FILENAME) | |
202 write_json(json_object, file_path) | |
203 | |
204 def generate_times_ms_file(self): | |
205 # FIXME: rename to generate_times_ms_file. This needs to be coordinated with | |
206 # changing the calls to this on the chromium build slaves. | |
207 times = test_timings_trie(self._test_results_map.values()) | |
208 file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME ) | |
209 write_json(times, file_path) | |
210 | |
211 def get_json(self): | |
212 """Gets the results for the results.json file.""" | |
213 results_json = {} | |
214 | |
215 if not results_json: | |
216 results_json, error = self._get_archived_json_results() | |
217 if error: | |
218 # If there was an error don't write a results.json | |
219 # file at all as it would lose all the information on the | |
220 # bot. | |
221 _log.error("Archive directory is inaccessible. Not " | |
222 "modifying or clobbering the results.json " | |
223 "file: " + str(error)) | |
224 return None | |
225 | |
226 builder_name = self._builder_name | |
227 if results_json and builder_name not in results_json: | |
228 _log.debug("Builder name (%s) is not in the results.json file." | |
229 % builder_name) | |
230 | |
231 self._convert_json_to_current_version(results_json) | |
232 | |
233 if builder_name not in results_json: | |
234 results_json[builder_name] = ( | |
235 self._create_results_for_builder_json()) | |
236 | |
237 results_for_builder = results_json[builder_name] | |
238 | |
239 if builder_name: | |
240 self._insert_generic_metadata(results_for_builder) | |
241 | |
242 self._insert_failure_summaries(results_for_builder) | |
243 | |
244 # Update the all failing tests with result type and time. | |
245 tests = results_for_builder[self.TESTS] | |
246 all_failing_tests = self._get_failed_test_names() | |
247 all_failing_tests.update(convert_trie_to_flat_paths(tests)) | |
248 | |
249 for test in all_failing_tests: | |
250 self._insert_test_time_and_result(test, tests) | |
251 | |
252 return results_json | |
253 | |
254 def set_archived_results(self, archived_results): | |
255 self._archived_results = archived_results | |
256 | |
257 def upload_json_files(self, json_files): | |
258 """Uploads the given json_files to the test_results_server (if the | |
259 test_results_server is given).""" | |
260 if not self._test_results_server: | |
261 return | |
262 | |
263 if not self._master_name: | |
264 _log.error("--test-results-server was set, but --master-name was not . Not uploading JSON files.") | |
265 return | |
266 | |
267 _log.info("Uploading JSON files for builder: %s", self._builder_name) | |
268 attrs = [("builder", self._builder_name), | |
269 ("testtype", self._test_type), | |
270 ("master", self._master_name)] | |
271 | |
272 files = [(file, os.path.join(self._results_directory, file)) | |
273 for file in json_files] | |
274 | |
275 url = "http://%s/testfile/upload" % self._test_results_server | |
276 # Set uploading timeout in case appengine server is having problems. | |
277 # 120 seconds are more than enough to upload test results. | |
278 uploader = _FileUploader(url, 120) | |
279 try: | |
280 response = uploader.upload_as_multipart_form_data(files, attrs) | |
281 if response: | |
282 if response.code == 200: | |
283 _log.info("JSON uploaded.") | |
284 else: | |
285 _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read())) | |
286 else: | |
287 _log.error("JSON upload failed; no response returned") | |
288 except Exception, err: | |
289 _log.error("Upload failed: %s" % err) | |
290 return | |
291 | |
292 | |
293 def _get_test_timing(self, test_name): | |
294 """Returns test timing data (elapsed time) in second | |
295 for the given test_name.""" | |
296 if test_name in self._test_results_map: | |
297 # Floor for now to get time in seconds. | |
298 return int(self._test_results_map[test_name].test_run_time) | |
299 return 0 | |
300 | |
301 def _get_failed_test_names(self): | |
302 """Returns a set of failed test names.""" | |
303 return set([r.test_name for r in self._test_results if r.failed]) | |
304 | |
305 def _get_modifier_char(self, test_name): | |
306 """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, | |
307 PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier | |
308 for the given test_name. | |
309 """ | |
310 if test_name not in self._test_results_map: | |
311 return self.__class__.NO_DATA_RESULT | |
312 | |
313 test_result = self._test_results_map[test_name] | |
314 if test_result.modifier in self.MODIFIER_TO_CHAR.keys(): | |
315 return self.MODIFIER_TO_CHAR[test_result.modifier] | |
316 | |
317 return self.__class__.PASS_RESULT | |
318 | |
319 def _get_result_char(self, test_name): | |
320 """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, | |
321 PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result | |
322 for the given test_name. | |
323 """ | |
324 if test_name not in self._test_results_map: | |
325 return self.__class__.NO_DATA_RESULT | |
326 | |
327 test_result = self._test_results_map[test_name] | |
328 if test_result.modifier == TestResult.DISABLED: | |
329 return self.__class__.SKIP_RESULT | |
330 | |
331 if test_result.failed: | |
332 return self.__class__.FAIL_RESULT | |
333 | |
334 return self.__class__.PASS_RESULT | |
335 | |
336 def _get_svn_revision(self, in_directory): | |
337 """Returns the svn revision for the given directory. | |
338 | |
339 Args: | |
340 in_directory: The directory where svn is to be run. | |
341 """ | |
342 # This is overridden in flakiness_dashboard_results_uploader.py. | |
343 raise NotImplementedError() | |
344 | |
345 def _get_archived_json_results(self): | |
346 """Download JSON file that only contains test | |
347 name list from test-results server. This is for generating incremental | |
348 JSON so the file generated has info for tests that failed before but | |
349 pass or are skipped from current run. | |
350 | |
351 Returns (archived_results, error) tuple where error is None if results | |
352 were successfully read. | |
353 """ | |
354 results_json = {} | |
355 old_results = None | |
356 error = None | |
357 | |
358 if not self._test_results_server: | |
359 return {}, None | |
360 | |
361 results_file_url = (self.URL_FOR_TEST_LIST_JSON % | |
362 (urllib2.quote(self._test_results_server), | |
363 urllib2.quote(self._builder_name), | |
364 self.RESULTS_FILENAME, | |
365 urllib2.quote(self._test_type), | |
366 urllib2.quote(self._master_name))) | |
367 | |
368 try: | |
369 # FIXME: We should talk to the network via a Host object. | |
370 results_file = urllib2.urlopen(results_file_url) | |
371 info = results_file.info() | |
372 old_results = results_file.read() | |
373 except urllib2.HTTPError, http_error: | |
374 # A non-4xx status code means the bot is hosed for some reason | |
375 # and we can't grab the results.json file off of it. | |
376 if (http_error.code < 400 and http_error.code >= 500): | |
377 error = http_error | |
378 except urllib2.URLError, url_error: | |
379 error = url_error | |
380 | |
381 if old_results: | |
382 # Strip the prefix and suffix so we can get the actual JSON object. | |
383 old_results = strip_json_wrapper(old_results) | |
384 | |
385 try: | |
386 results_json = json.loads(old_results) | |
387 except: | |
388 _log.debug("results.json was not valid JSON. Clobbering.") | |
389 # The JSON file is not valid JSON. Just clobber the results. | |
390 results_json = {} | |
391 else: | |
392 _log.debug('Old JSON results do not exist. Starting fresh.') | |
393 results_json = {} | |
394 | |
395 return results_json, error | |
396 | |
397 def _insert_failure_summaries(self, results_for_builder): | |
398 """Inserts aggregate pass/failure statistics into the JSON. | |
399 This method reads self._test_results and generates | |
400 FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries. | |
401 | |
402 Args: | |
403 results_for_builder: Dictionary containing the test results for a | |
404 single builder. | |
405 """ | |
406 # Insert the number of tests that failed or skipped. | |
407 fixable_count = len([r for r in self._test_results if r.fixable()]) | |
408 self._insert_item_into_raw_list(results_for_builder, | |
409 fixable_count, self.FIXABLE_COUNT) | |
410 | |
411 # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. | |
412 entry = {} | |
413 for test_name in self._test_results_map.iterkeys(): | |
414 result_char = self._get_modifier_char(test_name) | |
415 entry[result_char] = entry.get(result_char, 0) + 1 | |
416 | |
417 # Insert the pass/skip/failure summary dictionary. | |
418 self._insert_item_into_raw_list(results_for_builder, entry, | |
419 self.FIXABLE) | |
420 | |
421 # Insert the number of all the tests that are supposed to pass. | |
422 all_test_count = len(self._test_results) | |
423 self._insert_item_into_raw_list(results_for_builder, | |
424 all_test_count, self.ALL_FIXABLE_COUNT) | |
425 | |
426 def _insert_item_into_raw_list(self, results_for_builder, item, key): | |
427 """Inserts the item into the list with the given key in the results for | |
428 this builder. Creates the list if no such list exists. | |
429 | |
430 Args: | |
431 results_for_builder: Dictionary containing the test results for a | |
432 single builder. | |
433 item: Number or string to insert into the list. | |
434 key: Key in results_for_builder for the list to insert into. | |
435 """ | |
436 if key in results_for_builder: | |
437 raw_list = results_for_builder[key] | |
438 else: | |
439 raw_list = [] | |
440 | |
441 raw_list.insert(0, item) | |
442 raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] | |
443 results_for_builder[key] = raw_list | |
444 | |
445 def _insert_item_run_length_encoded(self, item, encoded_results): | |
446 """Inserts the item into the run-length encoded results. | |
447 | |
448 Args: | |
449 item: String or number to insert. | |
450 encoded_results: run-length encoded results. An array of arrays, e.g. | |
451 [[3,'A'],[1,'Q']] encodes AAAQ. | |
452 """ | |
453 if len(encoded_results) and item == encoded_results[0][1]: | |
454 num_results = encoded_results[0][0] | |
455 if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: | |
456 encoded_results[0][0] = num_results + 1 | |
457 else: | |
458 # Use a list instead of a class for the run-length encoding since | |
459 # we want the serialized form to be concise. | |
460 encoded_results.insert(0, [1, item]) | |
461 | |
462 def _insert_generic_metadata(self, results_for_builder): | |
463 """ Inserts generic metadata (such as version number, current time etc) | |
464 into the JSON. | |
465 | |
466 Args: | |
467 results_for_builder: Dictionary containing the test results for | |
468 a single builder. | |
469 """ | |
470 self._insert_item_into_raw_list(results_for_builder, | |
471 self._build_number, self.BUILD_NUMBERS) | |
472 | |
473 # Include SVN revisions for the given repositories. | |
474 for (name, path) in self._svn_repositories: | |
475 # Note: for JSON file's backward-compatibility we use 'chrome' rathe r | |
476 # than 'chromium' here. | |
477 lowercase_name = name.lower() | |
478 if lowercase_name == 'chromium': | |
479 lowercase_name = 'chrome' | |
480 self._insert_item_into_raw_list(results_for_builder, | |
481 self._get_svn_revision(path), | |
482 lowercase_name + 'Revision') | |
483 | |
484 self._insert_item_into_raw_list(results_for_builder, | |
485 int(time.time()), | |
486 self.TIME) | |
487 | |
488 def _insert_test_time_and_result(self, test_name, tests): | |
489 """ Insert a test item with its results to the given tests dictionary. | |
490 | |
491 Args: | |
492 tests: Dictionary containing test result entries. | |
493 """ | |
494 | |
495 result = self._get_result_char(test_name) | |
496 time = self._get_test_timing(test_name) | |
497 | |
498 this_test = tests | |
499 for segment in test_name.split("/"): | |
500 if segment not in this_test: | |
501 this_test[segment] = {} | |
502 this_test = this_test[segment] | |
503 | |
504 if not len(this_test): | |
505 self._populate_results_and_times_json(this_test) | |
506 | |
507 if self.RESULTS in this_test: | |
508 self._insert_item_run_length_encoded(result, this_test[self.RESULTS] ) | |
509 else: | |
510 this_test[self.RESULTS] = [[1, result]] | |
511 | |
512 if self.TIMES in this_test: | |
513 self._insert_item_run_length_encoded(time, this_test[self.TIMES]) | |
514 else: | |
515 this_test[self.TIMES] = [[1, time]] | |
516 | |
517 def _convert_json_to_current_version(self, results_json): | |
518 """If the JSON does not match the current version, converts it to the | |
519 current version and adds in the new version number. | |
520 """ | |
521 if self.VERSION_KEY in results_json: | |
522 archive_version = results_json[self.VERSION_KEY] | |
523 if archive_version == self.VERSION: | |
524 return | |
525 else: | |
526 archive_version = 3 | |
527 | |
528 # version 3->4 | |
529 if archive_version == 3: | |
530 num_results = len(results_json.values()) | |
531 for builder, results in results_json.iteritems(): | |
532 self._convert_tests_to_trie(results) | |
533 | |
534 results_json[self.VERSION_KEY] = self.VERSION | |
535 | |
536 def _convert_tests_to_trie(self, results): | |
537 if not self.TESTS in results: | |
538 return | |
539 | |
540 test_results = results[self.TESTS] | |
541 test_results_trie = {} | |
542 for test in test_results.iterkeys(): | |
543 single_test_result = test_results[test] | |
544 add_path_to_trie(test, single_test_result, test_results_trie) | |
545 | |
546 results[self.TESTS] = test_results_trie | |
547 | |
548 def _populate_results_and_times_json(self, results_and_times): | |
549 results_and_times[self.RESULTS] = [] | |
550 results_and_times[self.TIMES] = [] | |
551 return results_and_times | |
552 | |
553 def _create_results_for_builder_json(self): | |
554 results_for_builder = {} | |
555 results_for_builder[self.TESTS] = {} | |
556 return results_for_builder | |
557 | |
558 def _remove_items_over_max_number_of_builds(self, encoded_list): | |
559 """Removes items from the run-length encoded list after the final | |
560 item that exceeds the max number of builds to track. | |
561 | |
562 Args: | |
563 encoded_results: run-length encoded results. An array of arrays, e.g. | |
564 [[3,'A'],[1,'Q']] encodes AAAQ. | |
565 """ | |
566 num_builds = 0 | |
567 index = 0 | |
568 for result in encoded_list: | |
569 num_builds = num_builds + result[0] | |
570 index = index + 1 | |
571 if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: | |
572 return encoded_list[:index] | |
573 return encoded_list | |
574 | |
575 def _normalize_results_json(self, test, test_name, tests): | |
576 """ Prune tests where all runs pass or tests that no longer exist and | |
577 truncate all results to maxNumberOfBuilds. | |
578 | |
579 Args: | |
580 test: ResultsAndTimes object for this test. | |
581 test_name: Name of the test. | |
582 tests: The JSON object with all the test results for this builder. | |
583 """ | |
584 test[self.RESULTS] = self._remove_items_over_max_number_of_builds( | |
585 test[self.RESULTS]) | |
586 test[self.TIMES] = self._remove_items_over_max_number_of_builds( | |
587 test[self.TIMES]) | |
588 | |
589 is_all_pass = self._is_results_all_of_type(test[self.RESULTS], | |
590 self.PASS_RESULT) | |
591 is_all_no_data = self._is_results_all_of_type(test[self.RESULTS], | |
592 self.NO_DATA_RESULT) | |
593 max_time = max([time[1] for time in test[self.TIMES]]) | |
594 | |
595 # Remove all passes/no-data from the results to reduce noise and | |
596 # filesize. If a test passes every run, but takes > MIN_TIME to run, | |
597 # don't throw away the data. | |
598 if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): | |
599 del tests[test_name] | |
600 | |
601 def _is_results_all_of_type(self, results, type): | |
602 """Returns whether all the results are of the given type | |
603 (e.g. all passes).""" | |
604 return len(results) == 1 and results[0][1] == type | |
605 | |
606 | |
607 # Left here not to break anything. | |
608 class JSONResultsGenerator(JSONResultsGeneratorBase): | |
609 pass | |
610 | |
611 | |
612 class _FileUploader(object): | |
613 def __init__(self, url, timeout_seconds): | |
614 self._url = url | |
615 self._timeout_seconds = timeout_seconds | |
616 | |
617 def upload_as_multipart_form_data(self, filesystem, files, attrs): | |
618 file_objs = [] | |
619 for filename, path in files: | |
620 file_objs.append(('file', filename, filesystem.read_binary_file(path))) | |
621 | |
622 # FIXME: We should use the same variable names for the formal and actual | |
623 # parameters. | |
624 content_type, data = _encode_multipart_form_data(attrs, file_objs) | |
625 return self._upload_data(content_type, data) | |
626 | |
627 def _upload_data(self, content_type, data): | |
628 start = time.time() | |
629 end = start + self._timeout_seconds | |
630 while time.time() < end: | |
631 try: | |
632 request = urllib2.Request(self._url, data, | |
633 {"Content-Type": content_type}) | |
634 return urllib2.urlopen(request) | |
635 except urllib2.HTTPError as e: | |
636 _log.warn("Received HTTP status %s loading \"%s\". " | |
637 "Retrying in 10 seconds..." % (e.code, e.filename)) | |
638 time.sleep(10) | |
639 | |
640 | |
641 def _get_mime_type(filename): | |
642 return mimetypes.guess_type(filename)[0] or 'application/octet-stream' | |
643 | |
644 | |
645 # FIXME: Rather than taking tuples, this function should take more | |
646 # structured data. | |
647 def _encode_multipart_form_data(fields, files): | |
648 """Encode form fields for multipart/form-data. | |
649 | |
650 Args: | |
651 fields: A sequence of (name, value) elements for regular form fields. | |
652 files: A sequence of (name, filename, value) elements for data to be | |
653 uploaded as files. | |
654 Returns: | |
655 (content_type, body) ready for httplib.HTTP instance. | |
656 | |
657 Source: | |
658 http://code.google.com/p/rietveld/source/browse/trunk/upload.py | |
659 """ | |
660 BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' | |
661 CRLF = '\r\n' | |
662 lines = [] | |
663 | |
664 for key, value in fields: | |
665 lines.append('--' + BOUNDARY) | |
666 lines.append('Content-Disposition: form-data; name="%s"' % key) | |
667 lines.append('') | |
668 if isinstance(value, unicode): | |
669 value = value.encode('utf-8') | |
670 lines.append(value) | |
671 | |
672 for key, filename, value in files: | |
673 lines.append('--' + BOUNDARY) | |
674 lines.append('Content-Disposition: form-data; name="%s"; ' | |
675 'filename="%s"' % (key, filename)) | |
676 lines.append('Content-Type: %s' % _get_mime_type(filename)) | |
677 lines.append('') | |
678 if isinstance(value, unicode): | |
679 value = value.encode('utf-8') | |
680 lines.append(value) | |
681 | |
682 lines.append('--' + BOUNDARY + '--') | |
683 lines.append('') | |
684 body = CRLF.join(lines) | |
685 content_type = 'multipart/form-data; boundary=%s' % BOUNDARY | |
686 return content_type, body | |
OLD | NEW |