| OLD | NEW |
| (Empty) |
| 1 # Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 """Deals with loading & saving .size files.""" | |
| 6 | |
| 7 import cStringIO | |
| 8 import calendar | |
| 9 import collections | |
| 10 import datetime | |
| 11 import gzip | |
| 12 import json | |
| 13 import models | |
| 14 import logging | |
| 15 import os | |
| 16 import shutil | |
| 17 | |
| 18 | |
| 19 # File format version for .size files. | |
| 20 _SERIALIZATION_VERSION = 'Size File Format v1' | |
| 21 | |
| 22 | |
| 23 def _LogSize(file_obj, desc): | |
| 24 if not hasattr(file_obj, 'fileno'): | |
| 25 return | |
| 26 file_obj.flush() | |
| 27 size = os.fstat(file_obj.fileno()).st_size | |
| 28 logging.debug('File size with %s: %d' % (desc, size)) | |
| 29 | |
| 30 | |
| 31 def _SaveSizeInfoToFile(size_info, file_obj): | |
| 32 file_obj.write('# Created by //tools/binary_size\n') | |
| 33 file_obj.write('%s\n' % _SERIALIZATION_VERSION) | |
| 34 headers = { | |
| 35 'metadata': size_info.metadata, | |
| 36 'section_sizes': size_info.section_sizes, | |
| 37 } | |
| 38 metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True) | |
| 39 file_obj.write('%d\n' % len(metadata_str)) | |
| 40 file_obj.write(metadata_str) | |
| 41 file_obj.write('\n') | |
| 42 _LogSize(file_obj, 'header') # For libchrome: 570 bytes. | |
| 43 | |
| 44 # Store a single copy of all paths and have them referenced by index. | |
| 45 # Using an OrderedDict makes the indices more repetitive (better compression). | |
| 46 path_tuples = collections.OrderedDict.fromkeys( | |
| 47 (s.object_path, s.source_path) for s in size_info.raw_symbols) | |
| 48 for i, key in enumerate(path_tuples): | |
| 49 path_tuples[key] = i | |
| 50 file_obj.write('%d\n' % len(path_tuples)) | |
| 51 file_obj.writelines('%s\t%s\n' % pair for pair in path_tuples) | |
| 52 _LogSize(file_obj, 'paths') # For libchrome, adds 200kb. | |
| 53 | |
| 54 # Symbol counts by section. | |
| 55 by_section = models.SymbolGroup(size_info.raw_symbols) | |
| 56 by_section = by_section.GroupBySectionName().SortedByName() | |
| 57 file_obj.write('%s\n' % '\t'.join(g.name for g in by_section)) | |
| 58 file_obj.write('%s\n' % '\t'.join(str(len(g)) for g in by_section)) | |
| 59 | |
| 60 def write_numeric(func, delta=False): | |
| 61 for group in by_section: | |
| 62 prev_value = 0 | |
| 63 last_sym = group[-1] | |
| 64 for symbol in group: | |
| 65 value = func(symbol) | |
| 66 if delta: | |
| 67 value, prev_value = value - prev_value, value | |
| 68 file_obj.write(str(value)) | |
| 69 if symbol is not last_sym: | |
| 70 file_obj.write(' ') | |
| 71 file_obj.write('\n') | |
| 72 | |
| 73 write_numeric(lambda s: s.address, delta=True) | |
| 74 _LogSize(file_obj, 'addresses') # For libchrome, adds 300kb. | |
| 75 # Do not write padding, it will be recalcualted from addresses on load. | |
| 76 write_numeric(lambda s: s.size_without_padding) | |
| 77 _LogSize(file_obj, 'sizes') # For libchrome, adds 300kb | |
| 78 write_numeric(lambda s: path_tuples[(s.object_path, s.source_path)], | |
| 79 delta=True) | |
| 80 _LogSize(file_obj, 'path indices') # For libchrome: adds 125kb. | |
| 81 | |
| 82 for group in by_section: | |
| 83 for symbol in group: | |
| 84 # Do not write name when full_name exists. It will be derived on load. | |
| 85 file_obj.write(symbol.full_name or symbol.name) | |
| 86 if symbol.is_anonymous: | |
| 87 file_obj.write('\t1') | |
| 88 file_obj.write('\n') | |
| 89 _LogSize(file_obj, 'names (final)') # For libchrome: adds 3.5mb. | |
| 90 | |
| 91 | |
| 92 def _LoadSizeInfoFromFile(file_obj): | |
| 93 """Loads a size_info from the given file.""" | |
| 94 lines = iter(file_obj) | |
| 95 next(lines) # Comment line. | |
| 96 actual_version = next(lines)[:-1] | |
| 97 assert actual_version == _SERIALIZATION_VERSION, ( | |
| 98 'Version mismatch. Need to write some upgrade code.') | |
| 99 json_len = int(next(lines)) | |
| 100 json_str = file_obj.read(json_len) | |
| 101 headers = json.loads(json_str) | |
| 102 section_sizes = headers['section_sizes'] | |
| 103 metadata = headers.get('metadata') | |
| 104 lines = iter(file_obj) | |
| 105 next(lines) # newline after closing } of json. | |
| 106 | |
| 107 num_path_tuples = int(next(lines)) | |
| 108 path_tuples = [None] * num_path_tuples | |
| 109 for i in xrange(num_path_tuples): | |
| 110 path_tuples[i] = next(lines)[:-1].split('\t') | |
| 111 | |
| 112 section_names = next(lines)[:-1].split('\t') | |
| 113 section_counts = [int(c) for c in next(lines)[:-1].split('\t')] | |
| 114 | |
| 115 def read_numeric(delta=False): | |
| 116 ret = [] | |
| 117 delta_multiplier = int(delta) | |
| 118 for _ in section_counts: | |
| 119 value = 0 | |
| 120 fields = next(lines).split(' ') | |
| 121 for i, f in enumerate(fields): | |
| 122 value = value * delta_multiplier + int(f) | |
| 123 fields[i] = value | |
| 124 ret.append(fields) | |
| 125 return ret | |
| 126 | |
| 127 addresses = read_numeric(delta=True) | |
| 128 sizes = read_numeric(delta=False) | |
| 129 path_indices = read_numeric(delta=True) | |
| 130 | |
| 131 raw_symbols = [None] * sum(section_counts) | |
| 132 symbol_idx = 0 | |
| 133 for section_index, cur_section_name in enumerate(section_names): | |
| 134 for i in xrange(section_counts[section_index]): | |
| 135 line = next(lines)[:-1] | |
| 136 is_anonymous = line.endswith('\t1') | |
| 137 name = line[:-2] if is_anonymous else line | |
| 138 | |
| 139 new_sym = models.Symbol.__new__(models.Symbol) | |
| 140 new_sym.section_name = cur_section_name | |
| 141 new_sym.address = addresses[section_index][i] | |
| 142 new_sym.size = sizes[section_index][i] | |
| 143 new_sym.name = name | |
| 144 paths = path_tuples[path_indices[section_index][i]] | |
| 145 new_sym.object_path = paths[0] | |
| 146 new_sym.source_path = paths[1] | |
| 147 new_sym.is_anonymous = is_anonymous | |
| 148 new_sym.padding = 0 # Derived | |
| 149 new_sym.full_name = None # Derived | |
| 150 raw_symbols[symbol_idx] = new_sym | |
| 151 symbol_idx += 1 | |
| 152 | |
| 153 return models.SizeInfo(section_sizes, raw_symbols, metadata=metadata) | |
| 154 | |
| 155 | |
| 156 def SaveSizeInfo(size_info, path): | |
| 157 """Saves |size_info| to |path}.""" | |
| 158 if os.environ.get('MEASURE_GZIP') == '1': | |
| 159 with gzip.open(path, 'wb') as f: | |
| 160 _SaveSizeInfoToFile(size_info, f) | |
| 161 else: | |
| 162 # It is seconds faster to do gzip in a separate step. 6s -> 3.5s. | |
| 163 stringio = cStringIO.StringIO() | |
| 164 _SaveSizeInfoToFile(size_info, stringio) | |
| 165 | |
| 166 logging.debug('Serialization complete. Gzipping...') | |
| 167 stringio.seek(0) | |
| 168 with gzip.open(path, 'wb') as f: | |
| 169 shutil.copyfileobj(stringio, f) | |
| 170 | |
| 171 | |
| 172 def LoadSizeInfo(path): | |
| 173 """Returns a SizeInfo loaded from |path|.""" | |
| 174 with gzip.open(path) as f: | |
| 175 return _LoadSizeInfoFromFile(f) | |
| OLD | NEW |