Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(123)

Side by Side Diff: tools/isolateserver_load_test.py

Issue 25478012: Make Progress support an arbitrary number of columns. (Closed) Base URL: https://chromium.googlesource.com/a/chromium/tools/swarm_client@1_progress
Patch Set: Kept Progress.start public Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « googletest/trace_test_cases.py ('k') | tools/run_swarm_tests_on_swarm.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2013 The Chromium Authors. All rights reserved. 2 # Copyright 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Uploads a ton of stuff to isolateserver to test its handling. 6 """Uploads a ton of stuff to isolateserver to test its handling.
7 7
8 Generates an histogram with the latencies to download a just uploaded file. 8 Generates an histogram with the latencies to download a just uploaded file.
9 9
10 Note that it only looks at uploading and downloading and do not test 10 Note that it only looks at uploading and downloading and do not test
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 def gen(self, size): 45 def gen(self, size):
46 """Returns a str containing random data from the pool of size |size|.""" 46 """Returns a str containing random data from the pool of size |size|."""
47 chunks = int(size / 1024) 47 chunks = int(size / 1024)
48 rest = size - (chunks*1024) 48 rest = size - (chunks*1024)
49 data = ''.join(random.choice(self.pool) for _ in xrange(chunks)) 49 data = ''.join(random.choice(self.pool) for _ in xrange(chunks))
50 data += random.choice(self.pool)[:rest] 50 data += random.choice(self.pool)[:rest]
51 return data 51 return data
52 52
53 53
54 class Progress(threading_utils.Progress): 54 class Progress(threading_utils.Progress):
55 def __init__(self, *args, **kwargs): 55 def _render_columns(self):
56 super(Progress, self).__init__(*args, **kwargs) 56 """Prints the size data as 'units'."""
57 self.total = 0 57 columns_as_str = [
58 58 str(self._columns[0]),
59 def increment_index(self, name): 59 graph.to_units(self._columns[1]).rjust(6),
60 self.update_item(name, index=1) 60 str(self._columns[2]),
61 61 ]
62 def increment_count(self): 62 max_len = max((len(columns_as_str[0]), len(columns_as_str[2])))
63 self.update_item('', size=1) 63 return '/'.join(i.rjust(max_len) for i in columns_as_str)
64
65 def gen_line(self, name):
66 """Generates the line to be printed.
67
68 |name| is actually the item size.
69 """
70 if name:
71 self.total += name
72 name = graph.to_units(name)
73
74 next_line = ('[%*d/%d] %6.2fs %8s %8s') % (
75 len(str(self.size)), self.index,
76 self.size,
77 time.time() - self.start,
78 name,
79 graph.to_units(self.total))
80 # Fill it with whitespace only if self.use_cr_only is set.
81 prefix = ''
82 if self.use_cr_only and self.last_printed_line:
83 prefix = '\r'
84 if self.use_cr_only:
85 suffix = ' ' * max(0, len(self.last_printed_line) - len(next_line))
86 else:
87 suffix = '\n'
88 return '%s%s%s' % (prefix, next_line, suffix), next_line
89 64
90 65
91 def print_results(results, columns, buckets): 66 def print_results(results, columns, buckets):
92 delays = [i[0] for i in results if isinstance(i[0], float)] 67 delays = [i[0] for i in results if isinstance(i[0], float)]
93 failures = [i for i in results if not isinstance(i[0], float)] 68 failures = [i for i in results if not isinstance(i[0], float)]
94 sizes = [i[1] for i in results] 69 sizes = [i[1] for i in results]
95 70
96 print('%sSIZES%s (bytes):' % (colorama.Fore.RED, colorama.Fore.RESET)) 71 print('%sSIZES%s (bytes):' % (colorama.Fore.RED, colorama.Fore.RESET))
97 graph.print_histogram( 72 graph.print_histogram(
98 graph.generate_histogram(sizes, buckets), columns, '%d') 73 graph.generate_histogram(sizes, buckets), columns, '%d')
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 logging.info('download') 122 logging.info('download')
148 # Only count download time when not in dry run time. 123 # Only count download time when not in dry run time.
149 # TODO(maruel): Count the number of retries! 124 # TODO(maruel): Count the number of retries!
150 start = time.time() 125 start = time.time()
151 assert content == ''.join(api.fetch(hash_value, size)) 126 assert content == ''.join(api.fetch(hash_value, size))
152 else: 127 else:
153 time.sleep(size / 10.) 128 time.sleep(size / 10.)
154 duration = max(0, time.time() - start) 129 duration = max(0, time.time() - start)
155 except isolateserver.MappingError as e: 130 except isolateserver.MappingError as e:
156 duration = str(e) 131 duration = str(e)
157 progress.increment_index(size if isinstance(duration, float) else 0) 132 if isinstance(duration, float):
133 progress.update_item('', index=1, data=size)
134 else:
135 progress.update_item('', index=1)
158 return (duration, size) 136 return (duration, size)
159 137
160 138
161 def main(): 139 def main():
162 colorama.init() 140 colorama.init()
163 141
164 parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) 142 parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
165 parser.add_option( 143 parser.add_option(
166 '-I', '--isolate-server', 144 '-I', '--isolate-server',
167 metavar='URL', default='', 145 metavar='URL', default='',
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
212 ' - Using %d thread, items=%d, max-size=%d, mid-size=%d' % ( 190 ' - Using %d thread, items=%d, max-size=%d, mid-size=%d' % (
213 options.threads, options.items, options.max_size, options.mid_size)) 191 options.threads, options.items, options.max_size, options.mid_size))
214 if options.dry_run: 192 if options.dry_run:
215 print(' - %sDRY RUN MODE%s' % (colorama.Fore.GREEN, colorama.Fore.RESET)) 193 print(' - %sDRY RUN MODE%s' % (colorama.Fore.GREEN, colorama.Fore.RESET))
216 194
217 start = time.time() 195 start = time.time()
218 196
219 random_pool = Randomness() 197 random_pool = Randomness()
220 print(' - Generated pool after %.1fs' % (time.time() - start)) 198 print(' - Generated pool after %.1fs' % (time.time() - start))
221 199
222 progress = Progress(options.items) 200 columns = [('index', 0), ('data', 0), ('size', options.items)]
201 progress = Progress(columns)
223 api = isolateserver.get_storage_api(options.isolate_server, options.namespace) 202 api = isolateserver.get_storage_api(options.isolate_server, options.namespace)
224 do_item = functools.partial( 203 do_item = functools.partial(
225 send_and_receive, 204 send_and_receive,
226 random_pool, 205 random_pool,
227 options.dry_run, 206 options.dry_run,
228 api, 207 api,
229 progress) 208 progress)
230 209
231 # TODO(maruel): Handle Ctrl-C should: 210 # TODO(maruel): Handle Ctrl-C should:
232 # - Stop adding tasks. 211 # - Stop adding tasks.
233 # - Stop scheduling tasks in ThreadPool. 212 # - Stop scheduling tasks in ThreadPool.
234 # - Wait for the remaining ungoing tasks to complete. 213 # - Wait for the remaining ungoing tasks to complete.
235 # - Still print details and write the json file. 214 # - Still print details and write the json file.
236 with threading_utils.ThreadPoolWithProgress( 215 with threading_utils.ThreadPoolWithProgress(
237 progress, options.threads, options.threads, 0) as pool: 216 progress, options.threads, options.threads, 0) as pool:
238 if options.items: 217 if options.items:
239 for _ in xrange(options.items): 218 for _ in xrange(options.items):
240 pool.add_task(0, do_item, gen_size(options.mid_size)) 219 pool.add_task(0, do_item, gen_size(options.mid_size))
220 progress.print_update()
241 elif options.max_size: 221 elif options.max_size:
242 # This one is approximate. 222 # This one is approximate.
243 total = 0 223 total = 0
244 while True: 224 while True:
245 size = gen_size(options.mid_size) 225 size = gen_size(options.mid_size)
246 progress.increment_count() 226 progress.update_item('', size=1)
247 progress.print_update() 227 progress.print_update()
248 pool.add_task(0, do_item, size) 228 pool.add_task(0, do_item, size)
249 total += size 229 total += size
250 if total >= options.max_size: 230 if total >= options.max_size:
251 break 231 break
252 results = sorted(pool.join()) 232 results = sorted(pool.join())
253 233
254 print('') 234 print('')
255 print(' - Took %.1fs.' % (time.time() - start)) 235 print(' - Took %.1fs.' % (time.time() - start))
256 print('') 236 print('')
257 print_results(results, options.columns, options.buckets) 237 print_results(results, options.columns, options.buckets)
258 if options.dump: 238 if options.dump:
259 with open(options.dump, 'w') as f: 239 with open(options.dump, 'w') as f:
260 json.dump(results, f, separators=(',',':')) 240 json.dump(results, f, separators=(',',':'))
261 return 0 241 return 0
262 242
263 243
264 if __name__ == '__main__': 244 if __name__ == '__main__':
265 sys.exit(main()) 245 sys.exit(main())
OLDNEW
« no previous file with comments | « googletest/trace_test_cases.py ('k') | tools/run_swarm_tests_on_swarm.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698