OLD | NEW |
1 # Copyright 2017 The Chromium Authors. All rights reserved. | 1 # Copyright 2017 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Main Python API for analyzing binary size.""" | 5 """Main Python API for analyzing binary size.""" |
6 | 6 |
7 import argparse | 7 import argparse |
8 import calendar | 8 import calendar |
9 import collections | 9 import collections |
10 import datetime | 10 import datetime |
(...skipping 419 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
430 # (e.g. inline functions), and to update the object_path / source_path | 430 # (e.g. inline functions), and to update the object_path / source_path |
431 # fields accordingly. | 431 # fields accordingly. |
432 # Looking in object files is required because the .map file choses a | 432 # Looking in object files is required because the .map file choses a |
433 # single path for these symbols. | 433 # single path for these symbols. |
434 # Rather than record all paths for each symbol, set the paths to be the | 434 # Rather than record all paths for each symbol, set the paths to be the |
435 # common ancestor of all paths. | 435 # common ancestor of all paths. |
436 if output_directory: | 436 if output_directory: |
437 bulk_analyzer = nm.BulkObjectFileAnalyzer(tool_prefix, output_directory) | 437 bulk_analyzer = nm.BulkObjectFileAnalyzer(tool_prefix, output_directory) |
438 bulk_analyzer.AnalyzePaths(elf_object_paths) | 438 bulk_analyzer.AnalyzePaths(elf_object_paths) |
439 | 439 |
| 440 logging.info('Parsing Linker Map') |
440 with _OpenMaybeGz(map_path) as map_file: | 441 with _OpenMaybeGz(map_path) as map_file: |
441 section_sizes, raw_symbols = ( | 442 section_sizes, raw_symbols = ( |
442 linker_map_parser.MapFileParser().Parse(map_file)) | 443 linker_map_parser.MapFileParser().Parse(map_file)) |
443 | 444 |
444 if elf_path: | 445 if elf_path: |
445 logging.debug('Validating section sizes') | 446 logging.debug('Validating section sizes') |
446 elf_section_sizes = _SectionSizesFromElf(elf_path, tool_prefix) | 447 elf_section_sizes = _SectionSizesFromElf(elf_path, tool_prefix) |
447 for k, v in elf_section_sizes.iteritems(): | 448 for k, v in elf_section_sizes.iteritems(): |
448 if v != section_sizes.get(k): | 449 if v != section_sizes.get(k): |
449 logging.error('ELF file and .map file do not agree on section sizes.') | 450 logging.error('ELF file and .map file do not agree on section sizes.') |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
670 logging.warning('Packed section not present: %s', packed_section_name) | 671 logging.warning('Packed section not present: %s', packed_section_name) |
671 else: | 672 else: |
672 size_info.section_sizes['%s (unpacked)' % packed_section_name] = ( | 673 size_info.section_sizes['%s (unpacked)' % packed_section_name] = ( |
673 unstripped_section_sizes.get(packed_section_name)) | 674 unstripped_section_sizes.get(packed_section_name)) |
674 | 675 |
675 logging.info('Recording metadata: \n %s', | 676 logging.info('Recording metadata: \n %s', |
676 '\n '.join(describe.DescribeMetadata(size_info.metadata))) | 677 '\n '.join(describe.DescribeMetadata(size_info.metadata))) |
677 logging.info('Saving result to %s', args.size_file) | 678 logging.info('Saving result to %s', args.size_file) |
678 file_format.SaveSizeInfo(size_info, args.size_file) | 679 file_format.SaveSizeInfo(size_info, args.size_file) |
679 logging.info('Done') | 680 logging.info('Done') |
OLD | NEW |