OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """The deep heap profiler script for Chrome.""" | 6 """The deep heap profiler script for Chrome.""" |
7 | 7 |
8 from datetime import datetime | 8 from datetime import datetime |
9 import json | 9 import json |
10 import os | 10 import os |
(...skipping 10 matching lines...) Expand all Loading... | |
21 NULL_REGEX = re.compile('') | 21 NULL_REGEX = re.compile('') |
22 PPROF_PATH = os.path.join(os.path.dirname(__file__), | 22 PPROF_PATH = os.path.join(os.path.dirname(__file__), |
23 os.pardir, | 23 os.pardir, |
24 os.pardir, | 24 os.pardir, |
25 'third_party', | 25 'third_party', |
26 'tcmalloc', | 26 'tcmalloc', |
27 'chromium', | 27 'chromium', |
28 'src', | 28 'src', |
29 'pprof') | 29 'pprof') |
30 | 30 |
31 # Heap Profile Dump versions | 31 # Heap Profile Dump versions |
Alexander Potapenko
2012/04/12 12:51:33
Are you going to support all the versions? I guess
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
I'm planning to support all versions for a while s
| |
32 | 32 |
33 # DUMP_DEEP_1 DOES NOT distinct mmap regions and malloc chunks. | 33 # DUMP_DEEP_1 DOES NOT distinct mmap regions and malloc chunks. |
34 # Their stacktraces DO contain mmap* or tc-* at their tops. | 34 # Their stacktraces DO contain mmap* or tc-* at their tops. |
35 # They should be processed by POLICY_DEEP_1. | 35 # They should be processed by POLICY_DEEP_1. |
36 DUMP_DEEP_1 = 'DUMP_DEEP_1' | 36 DUMP_DEEP_1 = 'DUMP_DEEP_1' |
37 | 37 |
38 # DUMP_DEEP_2 DOES distinct mmap regions and malloc chunks. | 38 # DUMP_DEEP_2 DOES distinct mmap regions and malloc chunks. |
39 # Their stacktraces still DO contain mmap* or tc-*. | 39 # Their stacktraces still DO contain mmap* or tc-*. |
40 # They should be processed by POLICY_DEEP_1. | 40 # They should be processed by POLICY_DEEP_1. |
41 DUMP_DEEP_2 = 'DUMP_DEEP_2' | 41 DUMP_DEEP_2 = 'DUMP_DEEP_2' |
42 | 42 |
43 # DUMP_DEEP_3 DOES distinct mmap regions and malloc chunks. | 43 # DUMP_DEEP_3 DOES distinct mmap regions and malloc chunks. |
44 # Their stacktraces DO NOT contain mmap* or tc-*. | 44 # Their stacktraces DO NOT contain mmap* or tc-*. |
45 # They should be processed by POLICY_DEEP_2. | 45 # They should be processed by POLICY_DEEP_2. |
46 DUMP_DEEP_3 = 'DUMP_DEEP_3' | 46 DUMP_DEEP_3 = 'DUMP_DEEP_3' |
47 | 47 |
48 # DUMP_DEEP_4 adds some features to DUMP_DEEP_3: | |
49 # 1. Support comments starting with '#' | |
50 # 2. Support additional global stats: e.g. nonprofiled-*. | |
51 DUMP_DEEP_4 = 'DUMP_DEEP_4' | |
Alexander Potapenko
2012/04/12 12:51:33
I think it's time to stop adding DUMP_DEEP_X const
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
I was planning to add other versions than 'DUMP_DE
| |
52 | |
48 # Heap Profile Policy versions | 53 # Heap Profile Policy versions |
49 | 54 |
50 # POLICY_DEEP_1 DOES NOT include allocation_type columns. | 55 # POLICY_DEEP_1 DOES NOT include allocation_type columns. |
51 # mmap regions are distincted w/ mmap frames in the pattern column. | 56 # mmap regions are distincted w/ mmap frames in the pattern column. |
52 POLICY_DEEP_1 = 'POLICY_DEEP_1' | 57 POLICY_DEEP_1 = 'POLICY_DEEP_1' |
53 | 58 |
54 # POLICY_DEEP_2 DOES include allocation_type columns. | 59 # POLICY_DEEP_2 DOES include allocation_type columns. |
55 # mmap regions are distincted w/ the allocation_type column. | 60 # mmap regions are distincted w/ the allocation_type column. |
56 POLICY_DEEP_2 = 'POLICY_DEEP_2' | 61 POLICY_DEEP_2 = 'POLICY_DEEP_2' |
57 | 62 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
102 def __init__(self, stacktrace): | 107 def __init__(self, stacktrace): |
103 self.stacktrace = stacktrace | 108 self.stacktrace = stacktrace |
104 self.component = '' | 109 self.component = '' |
105 | 110 |
106 | 111 |
107 class Log(object): | 112 class Log(object): |
108 | 113 |
109 """A class representing one dumped log data.""" | 114 """A class representing one dumped log data.""" |
110 def __init__(self, log_path, buckets): | 115 def __init__(self, log_path, buckets): |
111 self.log_path = log_path | 116 self.log_path = log_path |
117 self.log_lines = [] | |
M-A Ruel
2012/04/12 12:40:36
Here's an example how to rewrite it in a functiona
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Finally, replaced it with:
self.log_lines = [l for
| |
112 with open(self.log_path, mode='r') as log_f: | 118 with open(self.log_path, mode='r') as log_f: |
113 self.log_lines = log_f.readlines() | 119 for log_line in log_f: |
120 if log_line[0] != '#': | |
Alexander Potapenko
2012/04/12 12:51:33
I think "not log_line.startswith('#')" is more rea
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Exactly. I used startswith as above.
| |
121 self.log_lines.append(log_line) | |
114 self.log_version = '' | 122 self.log_version = '' |
115 sys.stderr.write('parsing a log file:%s\n' % log_path) | 123 sys.stderr.write('parsing a log file:%s\n' % log_path) |
116 self.mmap_stacktrace_lines = [] | 124 self.mmap_stacktrace_lines = [] |
117 self.malloc_stacktrace_lines = [] | 125 self.malloc_stacktrace_lines = [] |
118 self.counters = {} | 126 self.counters = {} |
119 self.log_time = os.stat(self.log_path).st_mtime | 127 self.log_time = os.stat(self.log_path).st_mtime |
120 self.parse_log(buckets) | 128 self.parse_log(buckets) |
121 | 129 |
122 @staticmethod | 130 @staticmethod |
123 def dump_stacktrace_lines(stacktrace_lines, buckets): | 131 def dump_stacktrace_lines(stacktrace_lines, buckets): |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
296 line number in log_lines. | 304 line number in log_lines. |
297 """ | 305 """ |
298 ln = self.skip_lines_while( | 306 ln = self.skip_lines_while( |
299 ln, len(log_lines), lambda n: not log_lines[n].split()[0].isdigit()) | 307 ln, len(log_lines), lambda n: not log_lines[n].split()[0].isdigit()) |
300 stacktrace_lines_start = ln | 308 stacktrace_lines_start = ln |
301 ln = self.skip_lines_while( | 309 ln = self.skip_lines_while( |
302 ln, len(log_lines), | 310 ln, len(log_lines), |
303 lambda n: self.check_stacktrace_line(log_lines[n], buckets)) | 311 lambda n: self.check_stacktrace_line(log_lines[n], buckets)) |
304 return (log_lines[stacktrace_lines_start:ln], ln) | 312 return (log_lines[stacktrace_lines_start:ln], ln) |
305 | 313 |
306 def parse_stacktraces(self, buckets): | 314 def parse_stacktraces(self, buckets, ln): |
307 """Parses lines in self.log_lines as stacktrace. | 315 """Parses lines in self.log_lines as stacktrace. |
308 | 316 |
309 Valid stacktrace lines are stored into self.mmap_stacktrace_lines and | 317 Valid stacktrace lines are stored into self.mmap_stacktrace_lines and |
310 self.malloc_stacktrace_lines. | 318 self.malloc_stacktrace_lines. |
311 | 319 |
312 Args: | 320 Args: |
313 buckets: A dict mapping bucket ids and their corresponding Bucket | 321 buckets: A dict mapping bucket ids and their corresponding Bucket |
314 objects. | 322 objects. |
315 | 323 ln: An integer representing the starting line number in log_lines. |
Alexander Potapenko
2012/04/12 12:51:33
Please don't select short cryptic names for the fu
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Done.
| |
316 Returns: | |
317 A string representing a version of the stacktrace dump. '' for invalid | |
318 dump. | |
319 """ | 324 """ |
320 version = '' | 325 if self.log_version == DUMP_DEEP_3 or self.log_version == DUMP_DEEP_4: |
Alexander Potapenko
2012/04/12 12:51:33
self.log_version in [DUMP_DEEP_3, DUMP_DEEP_4]
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Done.
| |
321 | 326 sys.stderr.write(' heap profile dump version: %s\n' % self.log_version) |
Alexander Potapenko
2012/04/12 12:51:33
Please move this line to the top of the method, as
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Done.
| |
322 # Skip until an identifiable line. | |
323 headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ') | |
324 ln = self.skip_lines_while( | |
325 0, len(self.log_lines), | |
326 lambda n: not self.log_lines[n].startswith(headers)) | |
327 | |
328 # Identify a version. | |
329 if self.log_lines[ln].startswith('heap profile: '): | |
330 version = self.log_lines[ln][13:].strip() | |
331 if version == DUMP_DEEP_2 or version == DUMP_DEEP_3: | |
332 ln = self.skip_lines_while( | |
333 ln, len(self.log_lines), | |
334 lambda n: self.log_lines[n] != 'MMAP_STACKTRACES:\n') | |
335 else: | |
336 sys.stderr.write(' invalid heap profile dump version:%s\n' % version) | |
337 return '' | |
338 elif self.log_lines[ln] == 'STACKTRACES:\n': | |
339 version = DUMP_DEEP_1 | |
340 elif self.log_lines[ln] == 'MMAP_STACKTRACES:\n': | |
341 version = DUMP_DEEP_2 | |
342 | |
343 if version == DUMP_DEEP_3: | |
344 sys.stderr.write(' heap profile dump version: %s\n' % version) | |
345 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( | 327 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( |
346 buckets, self.log_lines, ln) | 328 buckets, self.log_lines, ln) |
347 ln = self.skip_lines_while( | 329 ln = self.skip_lines_while( |
348 ln, len(self.log_lines), | 330 ln, len(self.log_lines), |
349 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n') | 331 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n') |
350 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( | 332 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( |
351 buckets, self.log_lines, ln) | 333 buckets, self.log_lines, ln) |
352 return version | |
353 | 334 |
354 elif version == DUMP_DEEP_2: | 335 elif self.log_version == DUMP_DEEP_2: |
355 sys.stderr.write(' heap profile dump version: %s\n' % version) | 336 sys.stderr.write(' heap profile dump version: %s\n' % self.log_version) |
356 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( | 337 (self.mmap_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( |
357 buckets, self.log_lines, ln) | 338 buckets, self.log_lines, ln) |
358 ln = self.skip_lines_while( | 339 ln = self.skip_lines_while( |
359 ln, len(self.log_lines), | 340 ln, len(self.log_lines), |
360 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n') | 341 lambda n: self.log_lines[n] != 'MALLOC_STACKTRACES:\n') |
361 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( | 342 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( |
362 buckets, self.log_lines, ln) | 343 buckets, self.log_lines, ln) |
363 self.malloc_stacktrace_lines.extend(self.mmap_stacktrace_lines) | 344 self.malloc_stacktrace_lines.extend(self.mmap_stacktrace_lines) |
364 self.mmap_stacktrace_lines = [] | 345 self.mmap_stacktrace_lines = [] |
365 return version | |
366 | 346 |
367 elif version == DUMP_DEEP_1: | 347 elif self.log_version == DUMP_DEEP_1: |
368 sys.stderr.write(' heap profile dump version: %s\n' % version) | 348 sys.stderr.write(' heap profile dump version: %s\n' % self.log_version) |
369 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( | 349 (self.malloc_stacktrace_lines, ln) = self.parse_stacktraces_while_valid( |
370 buckets, self.log_lines, ln) | 350 buckets, self.log_lines, ln) |
371 return version | |
372 | 351 |
373 else: | 352 else: |
374 sys.stderr.write(' invalid heap profile dump version:%s\n' % version) | 353 sys.stderr.write(' invalid heap profile dump version:%s\n' % ( |
375 return '' | 354 self.log_version)) |
376 | 355 |
377 def parse_global_stats(self): | 356 def parse_global_stats(self): |
378 """Parses lines in self.log_lines as global stats.""" | 357 """Parses lines in self.log_lines as global stats.""" |
379 ln = self.skip_lines_while( | 358 ln = self.skip_lines_while( |
380 0, len(self.log_lines), | 359 0, len(self.log_lines), |
381 lambda n: self.log_lines[n] != 'GLOBAL_STATS:\n') | 360 lambda n: self.log_lines[n] != 'GLOBAL_STATS:\n') |
382 | 361 |
383 for prefix in ['total', 'file', 'anonymous', 'other', 'mmap', 'tcmalloc']: | 362 if self.log_version == DUMP_DEEP_4: |
363 global_stat_names = [ | |
364 'total', 'file-exec', 'file-nonexec', 'anonymous', 'stack', 'other', | |
365 'nonprofiled-absent', 'nonprofiled-anonymous', | |
366 'nonprofiled-file-exec', 'nonprofiled-file-nonexec', | |
367 'nonprofiled-stack', 'nonprofiled-other', | |
368 'profiled-mmap', 'profiled-malloc'] | |
369 else: | |
370 global_stat_names = [ | |
371 'total', 'file', 'anonymous', 'other', 'mmap', 'tcmalloc'] | |
372 | |
373 for prefix in global_stat_names: | |
384 ln = self.skip_lines_while( | 374 ln = self.skip_lines_while( |
385 ln, len(self.log_lines), | 375 ln, len(self.log_lines), |
386 lambda n: self.log_lines[n].split()[0] != prefix) | 376 lambda n: self.log_lines[n].split()[0] != prefix) |
387 words = self.log_lines[ln].split() | 377 words = self.log_lines[ln].split() |
388 self.counters[prefix + '_virtual'] = int(words[-2]) | 378 self.counters[prefix + '_virtual'] = int(words[-2]) |
389 self.counters[prefix + '_committed'] = int(words[-1]) | 379 self.counters[prefix + '_committed'] = int(words[-1]) |
390 | 380 |
381 | |
382 def parse_version(self): | |
383 """Parses a version string in self.log_lines. | |
384 | |
385 Returns: | |
386 A string representing a version of the stacktrace dump. '' for invalid | |
Alexander Potapenko
2012/04/12 12:51:33
Looks like you're returning two values here.
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Modified as follows :
Returns:
A pair of (a strin
| |
387 dump. | |
388 """ | |
389 version = '' | |
390 | |
391 # Skip until an identifiable line. | |
392 headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ') | |
393 ln = self.skip_lines_while( | |
394 0, len(self.log_lines), | |
395 lambda n: not self.log_lines[n].startswith(headers)) | |
396 | |
397 # Identify a version. | |
398 if self.log_lines[ln].startswith('heap profile: '): | |
Alexander Potapenko
2012/04/12 12:51:33
I guess too much machinery is involved in getting
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
The first versions didn't include version strings.
| |
399 version = self.log_lines[ln][13:].strip() | |
400 if (version == DUMP_DEEP_2 or version == DUMP_DEEP_3 or | |
401 version == DUMP_DEEP_4): | |
402 ln = self.skip_lines_while( | |
403 ln, len(self.log_lines), | |
404 lambda n: self.log_lines[n] != 'MMAP_STACKTRACES:\n') | |
405 else: | |
406 sys.stderr.write(' invalid heap profile dump version:%s\n' % version) | |
Alexander Potapenko
2012/04/12 12:51:33
Maybe just raise an exception here? You won't need
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Sounds reasonable. Just raised an exception.
| |
407 return '' | |
408 elif self.log_lines[ln] == 'STACKTRACES:\n': | |
409 version = DUMP_DEEP_1 | |
410 elif self.log_lines[ln] == 'MMAP_STACKTRACES:\n': | |
411 version = DUMP_DEEP_2 | |
412 | |
Alexander Potapenko
2012/04/12 12:51:33
spare newline
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Hmm, this newline is helpful for readability for m
| |
413 return (version, ln) | |
414 | |
391 def parse_log(self, buckets): | 415 def parse_log(self, buckets): |
416 self.log_version, ln = self.parse_version() | |
392 self.parse_global_stats() | 417 self.parse_global_stats() |
393 self.log_version = self.parse_stacktraces(buckets) | 418 self.parse_stacktraces(buckets, ln) |
394 | 419 |
395 @staticmethod | 420 @staticmethod |
396 def accumulate_size_for_policy(stacktrace_lines, | 421 def accumulate_size_for_policy(stacktrace_lines, |
397 policy_list, buckets, sizes, mmap): | 422 policy_list, buckets, sizes, mmap): |
398 for l in stacktrace_lines: | 423 for l in stacktrace_lines: |
399 words = l.split() | 424 words = l.split() |
400 bucket = buckets.get(int(words[BUCKET_ID])) | 425 bucket = buckets.get(int(words[BUCKET_ID])) |
401 component_match = get_component(policy_list, bucket, mmap) | 426 component_match = get_component(policy_list, bucket, mmap) |
402 sizes[component_match] += int(words[COMMITTED]) | 427 sizes[component_match] += int(words[COMMITTED]) |
403 | 428 |
(...skipping 23 matching lines...) Expand all Loading... | |
427 """ | 452 """ |
428 | 453 |
429 sys.stderr.write('apply policy:%s\n' % (self.log_path)) | 454 sys.stderr.write('apply policy:%s\n' % (self.log_path)) |
430 sizes = dict((c, 0) for c in components) | 455 sizes = dict((c, 0) for c in components) |
431 | 456 |
432 self.accumulate_size_for_policy(self.mmap_stacktrace_lines, | 457 self.accumulate_size_for_policy(self.mmap_stacktrace_lines, |
433 policy_list, buckets, sizes, True) | 458 policy_list, buckets, sizes, True) |
434 self.accumulate_size_for_policy(self.malloc_stacktrace_lines, | 459 self.accumulate_size_for_policy(self.malloc_stacktrace_lines, |
435 policy_list, buckets, sizes, False) | 460 policy_list, buckets, sizes, False) |
436 | 461 |
437 sizes['mmap-no-log'] = self.counters['mmap_committed'] - sizes[ | 462 if self.log_version == DUMP_DEEP_4: |
463 mmap_prefix = 'profiled-mmap' | |
464 malloc_prefix = 'profiled-malloc' | |
465 else: | |
466 mmap_prefix = 'mmap' | |
467 malloc_prefix = 'tcmalloc' | |
468 | |
469 sizes['mmap-no-log'] = self.counters[ | |
M-A Ruel
2012/04/12 12:40:36
I think this would be much more readable:
sizes['m
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Agreed. I think it's more readable. Actaually, I
| |
470 '%s_committed' % mmap_prefix] - sizes[ | |
438 'mmap-total-log'] | 471 'mmap-total-log'] |
439 sizes['mmap-total-record'] = self.counters['mmap_committed'] | 472 sizes['mmap-total-record'] = self.counters['%s_committed' % mmap_prefix] |
440 sizes['mmap-total-record-vm'] = self.counters['mmap_virtual'] | 473 sizes['mmap-total-record-vm'] = self.counters['%s_virtual' % mmap_prefix] |
441 | 474 |
442 sizes['tc-no-log'] = self.counters['tcmalloc_committed'] - sizes[ | 475 sizes['tc-no-log'] = self.counters[ |
476 '%s_committed' % malloc_prefix] - sizes[ | |
443 'tc-total-log'] | 477 'tc-total-log'] |
444 sizes['tc-total-record'] = self.counters['tcmalloc_committed'] | 478 sizes['tc-total-record'] = self.counters['%s_committed' % malloc_prefix] |
445 sizes['tc-unused'] = sizes['mmap-tcmalloc'] - self.counters[ | 479 sizes['tc-unused'] = sizes[ |
446 'tcmalloc_committed'] | 480 'mmap-tcmalloc'] - self.counters[ |
481 '%s_committed' % malloc_prefix] | |
447 sizes['tc-total'] = sizes['mmap-tcmalloc'] | 482 sizes['tc-total'] = sizes['mmap-tcmalloc'] |
448 | 483 |
449 for key, value in { 'total': 'total_committed', | 484 for key, value in { |
Alexander Potapenko
2012/04/12 12:51:33
This can be generated automatically:
map(lambda
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Just some of them doesn't have corresponding names
| |
450 'filemapped': 'file_committed', | 485 'total': 'total_committed', |
451 'anonymous': 'anonymous_committed', | 486 'filemapped': 'file_committed', |
452 'other': 'other_committed', | 487 'file-exec': 'file-exec_committed', |
453 'total-vm': 'total_virtual', | 488 'file-nonexec': 'file-nonexec_committed', |
454 'filemapped-vm': 'file_virtual', | 489 'anonymous': 'anonymous_committed', |
455 'anonymous-vm': 'anonymous_virtual', | 490 'stack': 'stack_committed', |
456 'other-vm': 'other_virtual' }.items(): | 491 'other': 'other_committed', |
492 'nonprofiled-absent': 'nonprofiled-absent_committed', | |
493 'nonprofiled-anonymous': 'nonprofiled-anonymous_committed', | |
494 'nonprofiled-file-exec': 'nonprofiled-file-exec_committed', | |
495 'nonprofiled-file-nonexec': 'nonprofiled-file-nonexec_committed', | |
496 'nonprofiled-stack': 'nonprofiled-stack_committed', | |
497 'nonprofiled-other': 'nonprofiled-other_committed', | |
498 'total-vm': 'total_virtual', | |
499 'filemapped-vm': 'file_virtual', | |
500 'anonymous-vm': 'anonymous_virtual', | |
501 'other-vm': 'other_virtual' }.items(): | |
M-A Ruel
2012/04/12 12:40:36
Use iteritems() instead of items()
Otherwise, you
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Done.
| |
457 if key in sizes: | 502 if key in sizes: |
458 sizes[key] = self.counters[value] | 503 sizes[key] = self.counters[value] |
459 | 504 |
460 if 'unknown' in sizes: | 505 if 'mustbezero' in sizes: |
461 sizes['unknown'] = self.counters['total_committed'] - self.counters[ | 506 sizes['mustbezero'] = self.counters[ |
M-A Ruel
2012/04/12 12:40:36
Use this format instead:
sizes['mustbezero'] = (
Dai Mikurube (NOT FULLTIME)
2012/04/13 05:41:10
Chose a loop. Thanks!
| |
462 'mmap_committed'] | 507 'total_committed'] - self.counters[ |
508 '%s_committed' % mmap_prefix] - self.counters[ | |
509 'nonprofiled-absent_committed'] - self.counters[ | |
510 'nonprofiled-anonymous_committed'] - self.counters[ | |
511 'nonprofiled-file-exec_committed'] - self.counters[ | |
512 'nonprofiled-file-nonexec_committed'] - self.counters[ | |
513 'nonprofiled-stack_committed'] - self.counters[ | |
514 'nonprofiled-other_committed'] | |
463 if 'total-exclude-profiler' in sizes: | 515 if 'total-exclude-profiler' in sizes: |
464 sizes['total-exclude-profiler'] = self.counters[ | 516 sizes['total-exclude-profiler'] = self.counters[ |
465 'total_committed'] - sizes['mmap-profiler'] | 517 'total_committed'] - sizes['mmap-profiler'] |
466 if 'hour' in sizes: | 518 if 'hour' in sizes: |
467 sizes['hour'] = (self.log_time - first_log_time) / 60.0 / 60.0 | 519 sizes['hour'] = (self.log_time - first_log_time) / 60.0 / 60.0 |
468 if 'minute' in sizes: | 520 if 'minute' in sizes: |
469 sizes['minute'] = (self.log_time - first_log_time) / 60.0 | 521 sizes['minute'] = (self.log_time - first_log_time) / 60.0 |
470 if 'second' in sizes: | 522 if 'second' in sizes: |
471 sizes['second'] = self.log_time - first_log_time | 523 sizes['second'] = self.log_time - first_log_time |
472 | 524 |
473 return sizes | 525 return sizes |
474 | 526 |
475 @staticmethod | 527 @staticmethod |
476 def accumulate_size_for_expand(stacktrace_lines, policy_list, buckets, | 528 def accumulate_size_for_expand(stacktrace_lines, policy_list, buckets, |
477 component_name, depth, sizes, mmap): | 529 component_name, depth, sizes, mmap): |
478 for line in stacktrace_lines: | 530 for line in stacktrace_lines: |
479 words = line.split() | 531 words = line.split() |
480 bucket = buckets.get(int(words[BUCKET_ID])) | 532 bucket = buckets.get(int(words[BUCKET_ID])) |
481 component_match = get_component(policy_list, bucket, mmap) | 533 component_match = get_component(policy_list, bucket, mmap) |
482 if component_match == component_name: | 534 if component_match == component_name: |
483 stacktrace_sequence = '' | 535 stacktrace_sequence = '' |
484 for address in bucket.stacktrace[1 : min(len(bucket.stacktrace), | 536 for address in bucket.stacktrace[0 : min(len(bucket.stacktrace), |
485 1 + depth)]: | 537 1 + depth)]: |
486 stacktrace_sequence += address_symbol_dict[address] + ' ' | 538 stacktrace_sequence += address_symbol_dict[address] + ' ' |
487 if not stacktrace_sequence in sizes: | 539 if not stacktrace_sequence in sizes: |
488 sizes[stacktrace_sequence] = 0 | 540 sizes[stacktrace_sequence] = 0 |
489 sizes[stacktrace_sequence] += int(words[COMMITTED]) | 541 sizes[stacktrace_sequence] += int(words[COMMITTED]) |
490 | 542 |
491 def expand(self, policy_list, buckets, component_name, depth): | 543 def expand(self, policy_list, buckets, component_name, depth): |
492 """Prints all stacktraces in a given component of given depth. | 544 """Prints all stacktraces in a given component of given depth. |
493 | 545 |
494 Args: | 546 Args: |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
758 | 810 |
759 elif action == '--pprof': | 811 elif action == '--pprof': |
760 if len(sys.argv) > 5: | 812 if len(sys.argv) > 5: |
761 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, sys.argv[5]) | 813 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, sys.argv[5]) |
762 else: | 814 else: |
763 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, None) | 815 logs[0].dump_for_pprof(policy_list, buckets, maps_lines, None) |
764 | 816 |
765 | 817 |
766 if __name__ == '__main__': | 818 if __name__ == '__main__': |
767 sys.exit(main()) | 819 sys.exit(main()) |
OLD | NEW |