OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2013 The Chromium Authors. All rights reserved. | 2 # Copyright 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Uploads a ton of stuff to isolateserver to test its handling. | 6 """Uploads a ton of stuff to isolateserver to test its handling. |
7 | 7 |
8 Generates an histogram with the latencies to download a just uploaded file. | 8 Generates an histogram with the latencies to download a just uploaded file. |
9 | 9 |
10 Note that it only looks at uploading and downloading and do not test | 10 Note that it only looks at uploading and downloading and do not test |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
45 def gen(self, size): | 45 def gen(self, size): |
46 """Returns a str containing random data from the pool of size |size|.""" | 46 """Returns a str containing random data from the pool of size |size|.""" |
47 chunks = int(size / 1024) | 47 chunks = int(size / 1024) |
48 rest = size - (chunks*1024) | 48 rest = size - (chunks*1024) |
49 data = ''.join(random.choice(self.pool) for _ in xrange(chunks)) | 49 data = ''.join(random.choice(self.pool) for _ in xrange(chunks)) |
50 data += random.choice(self.pool)[:rest] | 50 data += random.choice(self.pool)[:rest] |
51 return data | 51 return data |
52 | 52 |
53 | 53 |
54 class Progress(threading_utils.Progress): | 54 class Progress(threading_utils.Progress): |
55 def __init__(self, *args, **kwargs): | 55 def _render_columns(self): |
56 super(Progress, self).__init__(*args, **kwargs) | 56 """Prints the size data as 'units'.""" |
57 self.total = 0 | 57 columns_as_str = [ |
58 | 58 str(self._columns[0]), |
59 def increment_index(self, name): | 59 graph.to_units(self._columns[1]).rjust(6), |
60 self.update_item(name, index=1) | 60 str(self._columns[2]), |
61 | 61 ] |
62 def increment_count(self): | 62 max_len = max((len(columns_as_str[0]), len(columns_as_str[2]))) |
63 self.update_item('', size=1) | 63 return '/'.join(i.rjust(max_len) for i in columns_as_str) |
64 | |
65 def gen_line(self, name): | |
66 """Generates the line to be printed. | |
67 | |
68 |name| is actually the item size. | |
69 """ | |
70 if name: | |
71 self.total += name | |
72 name = graph.to_units(name) | |
73 | |
74 next_line = ('[%*d/%d] %6.2fs %8s %8s') % ( | |
75 len(str(self.size)), self.index, | |
76 self.size, | |
77 time.time() - self.start, | |
78 name, | |
79 graph.to_units(self.total)) | |
80 # Fill it with whitespace only if self.use_cr_only is set. | |
81 prefix = '' | |
82 if self.use_cr_only and self.last_printed_line: | |
83 prefix = '\r' | |
84 if self.use_cr_only: | |
85 suffix = ' ' * max(0, len(self.last_printed_line) - len(next_line)) | |
86 else: | |
87 suffix = '\n' | |
88 return '%s%s%s' % (prefix, next_line, suffix), next_line | |
89 | 64 |
90 | 65 |
91 def print_results(results, columns, buckets): | 66 def print_results(results, columns, buckets): |
92 delays = [i[0] for i in results if isinstance(i[0], float)] | 67 delays = [i[0] for i in results if isinstance(i[0], float)] |
93 failures = [i for i in results if not isinstance(i[0], float)] | 68 failures = [i for i in results if not isinstance(i[0], float)] |
94 sizes = [i[1] for i in results] | 69 sizes = [i[1] for i in results] |
95 | 70 |
96 print('%sSIZES%s (bytes):' % (colorama.Fore.RED, colorama.Fore.RESET)) | 71 print('%sSIZES%s (bytes):' % (colorama.Fore.RED, colorama.Fore.RESET)) |
97 graph.print_histogram( | 72 graph.print_histogram( |
98 graph.generate_histogram(sizes, buckets), columns, '%d') | 73 graph.generate_histogram(sizes, buckets), columns, '%d') |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
147 logging.info('download') | 122 logging.info('download') |
148 # Only count download time when not in dry run time. | 123 # Only count download time when not in dry run time. |
149 # TODO(maruel): Count the number of retries! | 124 # TODO(maruel): Count the number of retries! |
150 start = time.time() | 125 start = time.time() |
151 assert content == ''.join(api.fetch(hash_value, size)) | 126 assert content == ''.join(api.fetch(hash_value, size)) |
152 else: | 127 else: |
153 time.sleep(size / 10.) | 128 time.sleep(size / 10.) |
154 duration = max(0, time.time() - start) | 129 duration = max(0, time.time() - start) |
155 except isolateserver.MappingError as e: | 130 except isolateserver.MappingError as e: |
156 duration = str(e) | 131 duration = str(e) |
157 progress.increment_index(size if isinstance(duration, float) else 0) | 132 if isinstance(duration, float): |
133 progress.update_item('', col0=1, col1=size) | |
Vadim Sh.
2013/10/02 18:41:41
I find 'colN' thing very confusing.
Alternative:
| |
134 else: | |
135 progress.update_item('', col0=1) | |
158 return (duration, size) | 136 return (duration, size) |
159 | 137 |
160 | 138 |
161 def main(): | 139 def main(): |
162 colorama.init() | 140 colorama.init() |
163 | 141 |
164 parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) | 142 parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) |
165 parser.add_option( | 143 parser.add_option( |
166 '-I', '--isolate-server', | 144 '-I', '--isolate-server', |
167 metavar='URL', default='', | 145 metavar='URL', default='', |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
212 ' - Using %d thread, items=%d, max-size=%d, mid-size=%d' % ( | 190 ' - Using %d thread, items=%d, max-size=%d, mid-size=%d' % ( |
213 options.threads, options.items, options.max_size, options.mid_size)) | 191 options.threads, options.items, options.max_size, options.mid_size)) |
214 if options.dry_run: | 192 if options.dry_run: |
215 print(' - %sDRY RUN MODE%s' % (colorama.Fore.GREEN, colorama.Fore.RESET)) | 193 print(' - %sDRY RUN MODE%s' % (colorama.Fore.GREEN, colorama.Fore.RESET)) |
216 | 194 |
217 start = time.time() | 195 start = time.time() |
218 | 196 |
219 random_pool = Randomness() | 197 random_pool = Randomness() |
220 print(' - Generated pool after %.1fs' % (time.time() - start)) | 198 print(' - Generated pool after %.1fs' % (time.time() - start)) |
221 | 199 |
222 progress = Progress(options.items) | 200 progress = Progress([0, 0, options.items]) |
223 api = isolateserver.get_storage_api(options.isolate_server, options.namespace) | 201 api = isolateserver.get_storage_api(options.isolate_server, options.namespace) |
224 do_item = functools.partial( | 202 do_item = functools.partial( |
225 send_and_receive, | 203 send_and_receive, |
226 random_pool, | 204 random_pool, |
227 options.dry_run, | 205 options.dry_run, |
228 api, | 206 api, |
229 progress) | 207 progress) |
230 | 208 |
231 # TODO(maruel): Handle Ctrl-C should: | 209 # TODO(maruel): Handle Ctrl-C should: |
232 # - Stop adding tasks. | 210 # - Stop adding tasks. |
233 # - Stop scheduling tasks in ThreadPool. | 211 # - Stop scheduling tasks in ThreadPool. |
234 # - Wait for the remaining ungoing tasks to complete. | 212 # - Wait for the remaining ungoing tasks to complete. |
235 # - Still print details and write the json file. | 213 # - Still print details and write the json file. |
236 with threading_utils.ThreadPoolWithProgress( | 214 with threading_utils.ThreadPoolWithProgress( |
237 progress, options.threads, options.threads, 0) as pool: | 215 progress, options.threads, options.threads, 0) as pool: |
238 if options.items: | 216 if options.items: |
239 for _ in xrange(options.items): | 217 for _ in xrange(options.items): |
240 pool.add_task(0, do_item, gen_size(options.mid_size)) | 218 pool.add_task(0, do_item, gen_size(options.mid_size)) |
219 progress.print_update() | |
241 elif options.max_size: | 220 elif options.max_size: |
242 # This one is approximate. | 221 # This one is approximate. |
243 total = 0 | 222 total = 0 |
244 while True: | 223 while True: |
245 size = gen_size(options.mid_size) | 224 size = gen_size(options.mid_size) |
246 progress.increment_count() | 225 progress.update_item('', col2=1) |
247 progress.print_update() | 226 progress.print_update() |
248 pool.add_task(0, do_item, size) | 227 pool.add_task(0, do_item, size) |
249 total += size | 228 total += size |
250 if total >= options.max_size: | 229 if total >= options.max_size: |
251 break | 230 break |
252 results = sorted(pool.join()) | 231 results = sorted(pool.join()) |
253 | 232 |
254 print('') | 233 print('') |
255 print(' - Took %.1fs.' % (time.time() - start)) | 234 print(' - Took %.1fs.' % (time.time() - start)) |
256 print('') | 235 print('') |
257 print_results(results, options.columns, options.buckets) | 236 print_results(results, options.columns, options.buckets) |
258 if options.dump: | 237 if options.dump: |
259 with open(options.dump, 'w') as f: | 238 with open(options.dump, 'w') as f: |
260 json.dump(results, f, separators=(',',':')) | 239 json.dump(results, f, separators=(',',':')) |
261 return 0 | 240 return 0 |
262 | 241 |
263 | 242 |
264 if __name__ == '__main__': | 243 if __name__ == '__main__': |
265 sys.exit(main()) | 244 sys.exit(main()) |
OLD | NEW |