OLD | NEW |
1 # Copyright 2017 The Chromium Authors. All rights reserved. | 1 # Copyright 2017 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Deals with loading & saving .size files.""" | 5 """Deals with loading & saving .size files.""" |
6 | 6 |
7 import cStringIO | 7 import cStringIO |
8 import calendar | 8 import calendar |
9 import collections | 9 import collections |
10 import datetime | 10 import datetime |
(...skipping 27 matching lines...) Expand all Loading... |
38 } | 38 } |
39 metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True) | 39 metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True) |
40 file_obj.write('%d\n' % len(metadata_str)) | 40 file_obj.write('%d\n' % len(metadata_str)) |
41 file_obj.write(metadata_str) | 41 file_obj.write(metadata_str) |
42 file_obj.write('\n') | 42 file_obj.write('\n') |
43 _LogSize(file_obj, 'header') # For libchrome: 570 bytes. | 43 _LogSize(file_obj, 'header') # For libchrome: 570 bytes. |
44 | 44 |
45 # Store a single copy of all paths and have them referenced by index. | 45 # Store a single copy of all paths and have them referenced by index. |
46 # Using an OrderedDict makes the indices more repetitive (better compression). | 46 # Using an OrderedDict makes the indices more repetitive (better compression). |
47 path_tuples = collections.OrderedDict.fromkeys( | 47 path_tuples = collections.OrderedDict.fromkeys( |
48 (s.object_path, s.source_path) for s in size_info.symbols) | 48 (s.object_path, s.source_path) for s in size_info.raw_symbols) |
49 for i, key in enumerate(path_tuples): | 49 for i, key in enumerate(path_tuples): |
50 path_tuples[key] = i | 50 path_tuples[key] = i |
51 file_obj.write('%d\n' % len(path_tuples)) | 51 file_obj.write('%d\n' % len(path_tuples)) |
52 file_obj.writelines('%s\t%s\n' % pair for pair in path_tuples) | 52 file_obj.writelines('%s\t%s\n' % pair for pair in path_tuples) |
53 _LogSize(file_obj, 'paths') # For libchrome, adds 200kb. | 53 _LogSize(file_obj, 'paths') # For libchrome, adds 200kb. |
54 | 54 |
55 # Symbol counts by section. | 55 # Symbol counts by section. |
56 by_section = size_info.symbols.GroupedBySectionName().Sorted( | 56 by_section = size_info.raw_symbols.GroupedBySectionName().Sorted( |
57 key=lambda s:(s[0].IsBss(), s[0].address, s.full_name)) | 57 key=lambda s:(s[0].IsBss(), s[0].address, s.full_name)) |
58 file_obj.write('%s\n' % '\t'.join(g.name for g in by_section)) | 58 file_obj.write('%s\n' % '\t'.join(g.name for g in by_section)) |
59 file_obj.write('%s\n' % '\t'.join(str(len(g)) for g in by_section)) | 59 file_obj.write('%s\n' % '\t'.join(str(len(g)) for g in by_section)) |
60 | 60 |
61 def write_numeric(func, delta=False): | 61 def write_numeric(func, delta=False): |
62 for group in by_section: | 62 for group in by_section: |
63 prev_value = 0 | 63 prev_value = 0 |
64 last_sym = group[-1] | 64 last_sym = group[-1] |
65 for symbol in group: | 65 for symbol in group: |
66 value = func(symbol) | 66 value = func(symbol) |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 logging.debug('Serialization complete. Gzipping...') | 197 logging.debug('Serialization complete. Gzipping...') |
198 stringio.seek(0) | 198 stringio.seek(0) |
199 with gzip.open(path, 'wb') as f: | 199 with gzip.open(path, 'wb') as f: |
200 shutil.copyfileobj(stringio, f) | 200 shutil.copyfileobj(stringio, f) |
201 | 201 |
202 | 202 |
203 def LoadSizeInfo(path): | 203 def LoadSizeInfo(path): |
204 """Returns a SizeInfo loaded from |path|.""" | 204 """Returns a SizeInfo loaded from |path|.""" |
205 with gzip.open(path) as f: | 205 with gzip.open(path) as f: |
206 return _LoadSizeInfoFromFile(f) | 206 return _LoadSizeInfoFromFile(f) |
OLD | NEW |