Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(508)

Side by Side Diff: tools/deep_memory_profiler/dmprof

Issue 9812010: Breakdown nonprofiled memory regions (f.k.a. 'unknown'), and add new policy files. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: reflected the comments. Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/deep_memory_profiler/dmpolicy ('k') | tools/deep_memory_profiler/dmprof.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """The deep heap profiler script for Chrome.""" 6 """The deep heap profiler script for Chrome."""
7 7
8 from datetime import datetime 8 from datetime import datetime
9 import json 9 import json
10 import os 10 import os
(...skipping 27 matching lines...) Expand all
38 # DUMP_DEEP_2 DOES distinct mmap regions and malloc chunks. 38 # DUMP_DEEP_2 DOES distinct mmap regions and malloc chunks.
39 # Their stacktraces still DO contain mmap* or tc-*. 39 # Their stacktraces still DO contain mmap* or tc-*.
40 # They should be processed by POLICY_DEEP_1. 40 # They should be processed by POLICY_DEEP_1.
41 DUMP_DEEP_2 = 'DUMP_DEEP_2' 41 DUMP_DEEP_2 = 'DUMP_DEEP_2'
42 42
43 # DUMP_DEEP_3 DOES distinct mmap regions and malloc chunks. 43 # DUMP_DEEP_3 DOES distinct mmap regions and malloc chunks.
44 # Their stacktraces DO NOT contain mmap* or tc-*. 44 # Their stacktraces DO NOT contain mmap* or tc-*.
45 # They should be processed by POLICY_DEEP_2. 45 # They should be processed by POLICY_DEEP_2.
46 DUMP_DEEP_3 = 'DUMP_DEEP_3' 46 DUMP_DEEP_3 = 'DUMP_DEEP_3'
47 47
48 # DUMP_DEEP_4 adds some features to DUMP_DEEP_3:
49 # 1. Support comments starting with '#'
50 # 2. Support additional global stats: e.g. nonprofiled-*.
51 DUMP_DEEP_4 = 'DUMP_DEEP_4'
52
48 # Heap Profile Policy versions 53 # Heap Profile Policy versions
49 54
50 # POLICY_DEEP_1 DOES NOT include allocation_type columns. 55 # POLICY_DEEP_1 DOES NOT include allocation_type columns.
51 # mmap regions are distincted w/ mmap frames in the pattern column. 56 # mmap regions are distincted w/ mmap frames in the pattern column.
52 POLICY_DEEP_1 = 'POLICY_DEEP_1' 57 POLICY_DEEP_1 = 'POLICY_DEEP_1'
53 58
54 # POLICY_DEEP_2 DOES include allocation_type columns. 59 # POLICY_DEEP_2 DOES include allocation_type columns.
55 # mmap regions are distincted w/ the allocation_type column. 60 # mmap regions are distincted w/ the allocation_type column.
56 POLICY_DEEP_2 = 'POLICY_DEEP_2' 61 POLICY_DEEP_2 = 'POLICY_DEEP_2'
57 62
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
102 def __init__(self, stacktrace): 107 def __init__(self, stacktrace):
103 self.stacktrace = stacktrace 108 self.stacktrace = stacktrace
104 self.component = '' 109 self.component = ''
105 110
106 111
107 class Log(object): 112 class Log(object):
108 113
109 """A class representing one dumped log data.""" 114 """A class representing one dumped log data."""
110 def __init__(self, log_path, buckets): 115 def __init__(self, log_path, buckets):
111 self.log_path = log_path 116 self.log_path = log_path
112 with open(self.log_path, mode='r') as log_f: 117 self.log_lines = [
113 self.log_lines = log_f.readlines() 118 l for l in open(self.log_path, 'r') if l and not l.startswith('#')]
114 self.log_version = '' 119 self.log_version = ''
115 sys.stderr.write('parsing a log file:%s\n' % log_path) 120 sys.stderr.write('parsing a log file:%s\n' % log_path)
116 self.mmap_stacktrace_lines = [] 121 self.mmap_stacktrace_lines = []
117 self.malloc_stacktrace_lines = [] 122 self.malloc_stacktrace_lines = []
118 self.counters = {} 123 self.counters = {}
119 self.log_time = os.stat(self.log_path).st_mtime 124 self.log_time = os.stat(self.log_path).st_mtime
120 self.parse_log(buckets) 125 self.parse_log(buckets)
121 126
122 @staticmethod 127 @staticmethod
123 def dump_stacktrace_lines(stacktrace_lines, buckets): 128 def dump_stacktrace_lines(stacktrace_lines, buckets):
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 def skip_lines_while(line_number, max_line_number, skipping_condition): 280 def skip_lines_while(line_number, max_line_number, skipping_condition):
276 """Increments line_number until skipping_condition(line_number) is false. 281 """Increments line_number until skipping_condition(line_number) is false.
277 """ 282 """
278 while skipping_condition(line_number): 283 while skipping_condition(line_number):
279 line_number += 1 284 line_number += 1
280 if line_number >= max_line_number: 285 if line_number >= max_line_number:
281 sys.stderr.write('invalid heap profile dump.') 286 sys.stderr.write('invalid heap profile dump.')
282 return line_number 287 return line_number
283 return line_number 288 return line_number
284 289
285 def parse_stacktraces_while_valid(self, buckets, log_lines, ln): 290 def parse_stacktraces_while_valid(self, buckets, log_lines, line_number):
286 """Parses stacktrace lines while the lines are valid. 291 """Parses stacktrace lines while the lines are valid.
287 292
288 Args: 293 Args:
289 buckets: A dict mapping bucket ids and their corresponding Bucket 294 buckets: A dict mapping bucket ids and their corresponding Bucket
290 objects. 295 objects.
291 log_lines: A list of lines to be parsed. 296 log_lines: A list of lines to be parsed.
292 ln: An integer representing the starting line number in log_lines. 297 line_number: An integer representing the starting line number in
298 log_lines.
293 299
294 Returns: 300 Returns:
295 A pair of a list of valid lines and an integer representing the last 301 A pair of a list of valid lines and an integer representing the last
296 line number in log_lines. 302 line number in log_lines.
297 """ 303 """
298 ln = self.skip_lines_while( 304 line_number = self.skip_lines_while(
299 ln, len(log_lines), lambda n: not log_lines[n].split()[0].isdigit()) 305 line_number, len(log_lines),
300 stacktrace_lines_start = ln 306 lambda n: not log_lines[n].split()[0].isdigit())
301 ln = self.skip_lines_while( 307 stacktrace_lines_start = line_number
302 ln, len(log_lines), 308 line_number = self.skip_lines_while(
309 line_number, len(log_lines),
303 lambda n: self.check_stacktrace_line(log_lines[n], buckets)) 310 lambda n: self.check_stacktrace_line(log_lines[n], buckets))
304 return (log_lines[stacktrace_lines_start:ln], ln) 311 return (log_lines[stacktrace_lines_start:line_number], line_number)
305 312
306 def parse_stacktraces(self, buckets): 313 def parse_stacktraces(self, buckets, line_number):
307 """Parses lines in self.log_lines as stacktrace. 314 """Parses lines in self.log_lines as stacktrace.
308 315
309 Valid stacktrace lines are stored into self.mmap_stacktrace_lines and 316 Valid stacktrace lines are stored into self.mmap_stacktrace_lines and
310 self.malloc_stacktrace_lines. 317 self.malloc_stacktrace_lines.
311 318
312 Args: 319 Args:
313 buckets: A dict mapping bucket ids and their corresponding Bucket 320 buckets: A dict mapping bucket ids and their corresponding Bucket
314 objects. 321 objects.
322 line_number: An integer representing the starting line number in
323 log_lines.
324
325 Raises:
326 RuntimeException for invalid dump versions.
327 """
328 sys.stderr.write(' heap profile dump version: %s\n' % self.log_version)
329
330 if self.log_version in (DUMP_DEEP_3, DUMP_DEEP_4):
331 (self.mmap_stacktrace_lines, line_number) = (
332 self.parse_stacktraces_while_valid(
333 buckets, self.log_lines, line_number))
334 line_number = self.skip_lines_while(
335 line_number, len(self.log_lines),
336 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n')
337 (self.malloc_stacktrace_lines, line_number) = (
338 self.parse_stacktraces_while_valid(
339 buckets, self.log_lines, line_number))
340
341 elif self.log_version == DUMP_DEEP_2:
342 (self.mmap_stacktrace_lines, line_number) = (
343 self.parse_stacktraces_while_valid(
344 buckets, self.log_lines, line_number))
345 line_number = self.skip_lines_while(
346 line_number, len(self.log_lines),
347 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n')
348 (self.malloc_stacktrace_lines, line_number) = (
349 self.parse_stacktraces_while_valid(
350 buckets, self.log_lines, line_number))
351 self.malloc_stacktrace_lines.extend(self.mmap_stacktrace_lines)
352 self.mmap_stacktrace_lines = []
353
354 elif self.log_version == DUMP_DEEP_1:
355 (self.malloc_stacktrace_lines, line_number) = (
356 self.parse_stacktraces_while_valid(
357 buckets, self.log_lines, line_number))
358
359 else:
360 raise RuntimeError('invalid heap profile dump version: %s' % (
361 self.log_version))
362
363 def parse_global_stats(self):
364 """Parses lines in self.log_lines as global stats."""
365 ln = self.skip_lines_while(
366 0, len(self.log_lines),
367 lambda n: self.log_lines[n] != 'GLOBAL_STATS:\n')
368
369 if self.log_version == DUMP_DEEP_4:
370 global_stat_names = [
371 'total', 'file-exec', 'file-nonexec', 'anonymous', 'stack', 'other',
372 'nonprofiled-absent', 'nonprofiled-anonymous',
373 'nonprofiled-file-exec', 'nonprofiled-file-nonexec',
374 'nonprofiled-stack', 'nonprofiled-other',
375 'profiled-mmap', 'profiled-malloc']
376 else:
377 global_stat_names = [
378 'total', 'file', 'anonymous', 'other', 'mmap', 'tcmalloc']
379
380 for prefix in global_stat_names:
381 ln = self.skip_lines_while(
382 ln, len(self.log_lines),
383 lambda n: self.log_lines[n].split()[0] != prefix)
384 words = self.log_lines[ln].split()
385 self.counters[prefix + '_virtual'] = int(words[-2])
386 self.counters[prefix + '_committed'] = int(words[-1])
387
388 def parse_version(self):
389 """Parses a version string in self.log_lines.
315 390
316 Returns: 391 Returns:
317 A string representing a version of the stacktrace dump. '' for invalid 392 A pair of (a string representing a version of the stacktrace dump,
318 dump. 393 and an integer indicating a line number next to the version string).
394
395 Raises:
396 RuntimeException for invalid dump versions.
319 """ 397 """
320 version = '' 398 version = ''
321 399
322 # Skip until an identifiable line. 400 # Skip until an identifiable line.
323 headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ') 401 headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ')
324 ln = self.skip_lines_while( 402 ln = self.skip_lines_while(
325 0, len(self.log_lines), 403 0, len(self.log_lines),
326 lambda n: not self.log_lines[n].startswith(headers)) 404 lambda n: not self.log_lines[n].startswith(headers))
327 405
328 # Identify a version. 406 # Identify a version.
329 if self.log_lines[ln].startswith('heap profile: '): 407 if self.log_lines[ln].startswith('heap profile: '):
330 version = self.log_lines[ln][13:].strip() 408 version = self.log_lines[ln][13:].strip()
331 if version == DUMP_DEEP_2 or version == DUMP_DEEP_3: 409 if (version == DUMP_DEEP_2 or version == DUMP_DEEP_3 or
410 version == DUMP_DEEP_4):
332 ln = self.skip_lines_while( 411 ln = self.skip_lines_while(
333 ln, len(self.log_lines), 412 ln, len(self.log_lines),
334 lambda n: self.log_lines[n] != 'MMAP_STACKTRACES:\n') 413 lambda n: self.log_lines[n] != 'MMAP_STACKTRACES:\n')
335 else: 414 else:
336 sys.stderr.write(' invalid heap profile dump version:%s\n' % version) 415 raise RuntimeError('invalid heap profile dump version: %s' % version)
337 return ''
338 elif self.log_lines[ln] == 'STACKTRACES:\n': 416 elif self.log_lines[ln] == 'STACKTRACES:\n':
339 version = DUMP_DEEP_1 417 version = DUMP_DEEP_1
340 elif self.log_lines[ln] == 'MMAP_STACKTRACES:\n': 418 elif self.log_lines[ln] == 'MMAP_STACKTRACES:\n':
341 version = DUMP_DEEP_2 419 version = DUMP_DEEP_2
342 420
343 if version == DUMP_DEEP_3: 421 return (version, ln)
344 sys.stderr.write(' heap profile dump version: %s\n' % version)
345 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid(
346 buckets, self.log_lines, ln)
347 ln = self.skip_lines_while(
348 ln, len(self.log_lines),
349 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n')
350 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid(
351 buckets, self.log_lines, ln)
352 return version
353
354 elif version == DUMP_DEEP_2:
355 sys.stderr.write(' heap profile dump version: %s\n' % version)
356 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid(
357 buckets, self.log_lines, ln)
358 ln = self.skip_lines_while(
359 ln, len(self.log_lines),
360 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n')
361 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid(
362 buckets, self.log_lines, ln)
363 self.malloc_stacktrace_lines.extend(self.mmap_stacktrace_lines)
364 self.mmap_stacktrace_lines = []
365 return version
366
367 elif version == DUMP_DEEP_1:
368 sys.stderr.write(' heap profile dump version: %s\n' % version)
369 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid(
370 buckets, self.log_lines, ln)
371 return version
372
373 else:
374 sys.stderr.write(' invalid heap profile dump version:%s\n' % version)
375 return ''
376
377 def parse_global_stats(self):
378 """Parses lines in self.log_lines as global stats."""
379 ln = self.skip_lines_while(
380 0, len(self.log_lines),
381 lambda n: self.log_lines[n] != 'GLOBAL_STATS:\n')
382
383 for prefix in ['total', 'file', 'anonymous', 'other', 'mmap', 'tcmalloc']:
384 ln = self.skip_lines_while(
385 ln, len(self.log_lines),
386 lambda n: self.log_lines[n].split()[0] != prefix)
387 words = self.log_lines[ln].split()
388 self.counters[prefix + '_virtual'] = int(words[-2])
389 self.counters[prefix + '_committed'] = int(words[-1])
390 422
391 def parse_log(self, buckets): 423 def parse_log(self, buckets):
424 self.log_version, ln = self.parse_version()
392 self.parse_global_stats() 425 self.parse_global_stats()
393 self.log_version = self.parse_stacktraces(buckets) 426 self.parse_stacktraces(buckets, ln)
394 427
395 @staticmethod 428 @staticmethod
396 def accumulate_size_for_policy(stacktrace_lines, 429 def accumulate_size_for_policy(stacktrace_lines,
397 policy_list, buckets, sizes, mmap): 430 policy_list, buckets, sizes, mmap):
398 for l in stacktrace_lines: 431 for l in stacktrace_lines:
399 words = l.split() 432 words = l.split()
400 bucket = buckets.get(int(words[BUCKET_ID])) 433 bucket = buckets.get(int(words[BUCKET_ID]))
401 component_match = get_component(policy_list, bucket, mmap) 434 component_match = get_component(policy_list, bucket, mmap)
402 sizes[component_match] += int(words[COMMITTED]) 435 sizes[component_match] += int(words[COMMITTED])
403 436
(...skipping 23 matching lines...) Expand all
427 """ 460 """
428 461
429 sys.stderr.write('apply policy:%s\n' % (self.log_path)) 462 sys.stderr.write('apply policy:%s\n' % (self.log_path))
430 sizes = dict((c, 0) for c in components) 463 sizes = dict((c, 0) for c in components)
431 464
432 self.accumulate_size_for_policy(self.mmap_stacktrace_lines, 465 self.accumulate_size_for_policy(self.mmap_stacktrace_lines,
433 policy_list, buckets, sizes, True) 466 policy_list, buckets, sizes, True)
434 self.accumulate_size_for_policy(self.malloc_stacktrace_lines, 467 self.accumulate_size_for_policy(self.malloc_stacktrace_lines,
435 policy_list, buckets, sizes, False) 468 policy_list, buckets, sizes, False)
436 469
437 sizes['mmap-no-log'] = self.counters['mmap_committed'] - sizes[ 470 if self.log_version == DUMP_DEEP_4:
438 'mmap-total-log'] 471 mmap_prefix = 'profiled-mmap'
439 sizes['mmap-total-record'] = self.counters['mmap_committed'] 472 malloc_prefix = 'profiled-malloc'
440 sizes['mmap-total-record-vm'] = self.counters['mmap_virtual'] 473 else:
474 mmap_prefix = 'mmap'
475 malloc_prefix = 'tcmalloc'
441 476
442 sizes['tc-no-log'] = self.counters['tcmalloc_committed'] - sizes[ 477 sizes['mmap-no-log'] = (
443 'tc-total-log'] 478 self.counters['%s_committed' % mmap_prefix] - sizes['mmap-total-log'])
444 sizes['tc-total-record'] = self.counters['tcmalloc_committed'] 479 sizes['mmap-total-record'] = self.counters['%s_committed' % mmap_prefix]
445 sizes['tc-unused'] = sizes['mmap-tcmalloc'] - self.counters[ 480 sizes['mmap-total-record-vm'] = self.counters['%s_virtual' % mmap_prefix]
446 'tcmalloc_committed'] 481
482 sizes['tc-no-log'] = (
483 self.counters['%s_committed' % malloc_prefix] - sizes['tc-total-log'])
484 sizes['tc-total-record'] = self.counters['%s_committed' % malloc_prefix]
485 sizes['tc-unused'] = (
486 sizes['mmap-tcmalloc'] - self.counters['%s_committed' % malloc_prefix])
447 sizes['tc-total'] = sizes['mmap-tcmalloc'] 487 sizes['tc-total'] = sizes['mmap-tcmalloc']
448 488
449 for key, value in { 'total': 'total_committed', 489 for key, value in {
450 'filemapped': 'file_committed', 490 'total': 'total_committed',
451 'anonymous': 'anonymous_committed', 491 'filemapped': 'file_committed',
452 'other': 'other_committed', 492 'file-exec': 'file-exec_committed',
453 'total-vm': 'total_virtual', 493 'file-nonexec': 'file-nonexec_committed',
454 'filemapped-vm': 'file_virtual', 494 'anonymous': 'anonymous_committed',
455 'anonymous-vm': 'anonymous_virtual', 495 'stack': 'stack_committed',
456 'other-vm': 'other_virtual' }.items(): 496 'other': 'other_committed',
497 'nonprofiled-absent': 'nonprofiled-absent_committed',
498 'nonprofiled-anonymous': 'nonprofiled-anonymous_committed',
499 'nonprofiled-file-exec': 'nonprofiled-file-exec_committed',
500 'nonprofiled-file-nonexec': 'nonprofiled-file-nonexec_committed',
501 'nonprofiled-stack': 'nonprofiled-stack_committed',
502 'nonprofiled-other': 'nonprofiled-other_committed',
503 'total-vm': 'total_virtual',
504 'filemapped-vm': 'file_virtual',
505 'anonymous-vm': 'anonymous_virtual',
506 'other-vm': 'other_virtual' }.iteritems():
457 if key in sizes: 507 if key in sizes:
458 sizes[key] = self.counters[value] 508 sizes[key] = self.counters[value]
459 509
460 if 'unknown' in sizes: 510 if 'mustbezero' in sizes:
461 sizes['unknown'] = self.counters['total_committed'] - self.counters[ 511 removed = (
462 'mmap_committed'] 512 '%s_committed' % mmap_prefix,
513 'nonprofiled-absent_committed',
514 'nonprofiled-anonymous_committed',
515 'nonprofiled-file-exec_committed',
516 'nonprofiled-file-nonexec_committed',
517 'nonprofiled-stack_committed',
518 'nonprofiled-other_committed')
519 sizes['mustbezero'] = (
520 self.counters['total_committed'] -
521 sum(self.counters[i] for i in removed))
463 if 'total-exclude-profiler' in sizes: 522 if 'total-exclude-profiler' in sizes:
464 sizes['total-exclude-profiler'] = self.counters[ 523 sizes['total-exclude-profiler'] = (
465 'total_committed'] - sizes['mmap-profiler'] 524 self.counters['total_committed'] - sizes['mmap-profiler'])
466 if 'hour' in sizes: 525 if 'hour' in sizes:
467 sizes['hour'] = (self.log_time - first_log_time) / 60.0 / 60.0 526 sizes['hour'] = (self.log_time - first_log_time) / 60.0 / 60.0
468 if 'minute' in sizes: 527 if 'minute' in sizes:
469 sizes['minute'] = (self.log_time - first_log_time) / 60.0 528 sizes['minute'] = (self.log_time - first_log_time) / 60.0
470 if 'second' in sizes: 529 if 'second' in sizes:
471 sizes['second'] = self.log_time - first_log_time 530 sizes['second'] = self.log_time - first_log_time
472 531
473 return sizes 532 return sizes
474 533
475 @staticmethod 534 @staticmethod
476 def accumulate_size_for_expand(stacktrace_lines, policy_list, buckets, 535 def accumulate_size_for_expand(stacktrace_lines, policy_list, buckets,
477 component_name, depth, sizes, mmap): 536 component_name, depth, sizes, mmap):
478 for line in stacktrace_lines: 537 for line in stacktrace_lines:
479 words = line.split() 538 words = line.split()
480 bucket = buckets.get(int(words[BUCKET_ID])) 539 bucket = buckets.get(int(words[BUCKET_ID]))
481 component_match = get_component(policy_list, bucket, mmap) 540 component_match = get_component(policy_list, bucket, mmap)
482 if component_match == component_name: 541 if component_match == component_name:
483 stacktrace_sequence = '' 542 stacktrace_sequence = ''
484 for address in bucket.stacktrace[1 : min(len(bucket.stacktrace), 543 for address in bucket.stacktrace[0 : min(len(bucket.stacktrace),
485 1 + depth)]: 544 1 + depth)]:
486 stacktrace_sequence += address_symbol_dict[address] + ' ' 545 stacktrace_sequence += address_symbol_dict[address] + ' '
487 if not stacktrace_sequence in sizes: 546 if not stacktrace_sequence in sizes:
488 sizes[stacktrace_sequence] = 0 547 sizes[stacktrace_sequence] = 0
489 sizes[stacktrace_sequence] += int(words[COMMITTED]) 548 sizes[stacktrace_sequence] += int(words[COMMITTED])
490 549
491 def expand(self, policy_list, buckets, component_name, depth): 550 def expand(self, policy_list, buckets, component_name, depth):
492 """Prints all stacktraces in a given component of given depth. 551 """Prints all stacktraces in a given component of given depth.
493 552
494 Args: 553 Args:
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after
758 817
759 elif action == '--pprof': 818 elif action == '--pprof':
760 if len(sys.argv) > 5: 819 if len(sys.argv) > 5:
761 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, sys.argv[5]) 820 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, sys.argv[5])
762 else: 821 else:
763 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, None) 822 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, None)
764 823
765 824
766 if __name__ == '__main__': 825 if __name__ == '__main__':
767 sys.exit(main()) 826 sys.exit(main())
OLDNEW
« no previous file with comments | « tools/deep_memory_profiler/dmpolicy ('k') | tools/deep_memory_profiler/dmprof.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698