Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(533)

Side by Side Diff: tools/binary_size/file_format.py

Issue 2801663003: //tools/binary_size: Add Disassemble() to console.py. Tweak metadata. (Closed)
Patch Set: Review comments Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/binary_size/describe.py ('k') | tools/binary_size/integration_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2017 The Chromium Authors. All rights reserved. 1 # Copyright 2017 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Deals with loading & saving .size files.""" 5 """Deals with loading & saving .size files."""
6 6
7 import cStringIO 7 import cStringIO
8 import calendar 8 import calendar
9 import collections 9 import collections
10 import datetime 10 import datetime
(...skipping 14 matching lines...) Expand all
25 return 25 return
26 file_obj.flush() 26 file_obj.flush()
27 size = os.fstat(file_obj.fileno()).st_size 27 size = os.fstat(file_obj.fileno()).st_size
28 logging.debug('File size with %s: %d' % (desc, size)) 28 logging.debug('File size with %s: %d' % (desc, size))
29 29
30 30
31 def _SaveSizeInfoToFile(size_info, file_obj): 31 def _SaveSizeInfoToFile(size_info, file_obj):
32 file_obj.write('# Created by //tools/binary_size\n') 32 file_obj.write('# Created by //tools/binary_size\n')
33 file_obj.write('%s\n' % _SERIALIZATION_VERSION) 33 file_obj.write('%s\n' % _SERIALIZATION_VERSION)
34 headers = { 34 headers = {
35 'tag': size_info.tag, 35 'metadata': size_info.metadata,
36 'section_sizes': size_info.section_sizes, 36 'section_sizes': size_info.section_sizes,
37 } 37 }
38 if size_info.timestamp:
39 headers['timestamp'] = calendar.timegm(size_info.timestamp.timetuple())
40 metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True) 38 metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True)
41 file_obj.write('%d\n' % len(metadata_str)) 39 file_obj.write('%d\n' % len(metadata_str))
42 file_obj.write(metadata_str) 40 file_obj.write(metadata_str)
43 file_obj.write('\n') 41 file_obj.write('\n')
44 _LogSize(file_obj, 'header') # For libchrome: 570 bytes. 42 _LogSize(file_obj, 'header') # For libchrome: 570 bytes.
45 43
46 # Store a single copy of all paths and have them referenced by index. 44 # Store a single copy of all paths and have them referenced by index.
47 # Using an OrderedDict makes the indices more repetitive (better compression). 45 # Using an OrderedDict makes the indices more repetitive (better compression).
48 path_tuples = collections.OrderedDict.fromkeys( 46 path_tuples = collections.OrderedDict.fromkeys(
49 (s.object_path, s.source_path) for s in size_info.symbols) 47 (s.object_path, s.source_path) for s in size_info.symbols)
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 90
93 def _LoadSizeInfoFromFile(file_obj): 91 def _LoadSizeInfoFromFile(file_obj):
94 """Loads a size_info from the given file.""" 92 """Loads a size_info from the given file."""
95 lines = iter(file_obj) 93 lines = iter(file_obj)
96 next(lines) # Comment line. 94 next(lines) # Comment line.
97 actual_version = next(lines)[:-1] 95 actual_version = next(lines)[:-1]
98 assert actual_version == _SERIALIZATION_VERSION, ( 96 assert actual_version == _SERIALIZATION_VERSION, (
99 'Version mismatch. Need to write some upgrade code.') 97 'Version mismatch. Need to write some upgrade code.')
100 json_len = int(next(lines)) 98 json_len = int(next(lines))
101 json_str = file_obj.read(json_len) 99 json_str = file_obj.read(json_len)
102 metadata = json.loads(json_str) 100 headers = json.loads(json_str)
103 timestamp = metadata.get('timestamp') 101 section_sizes = headers['section_sizes']
104 if timestamp is not None: 102 metadata = headers['metadata']
105 timestamp = datetime.datetime.utcfromtimestamp(timestamp)
106 tag = metadata['tag']
107 section_sizes = metadata['section_sizes']
108
109 lines = iter(file_obj) 103 lines = iter(file_obj)
110 next(lines) # newline after closing } of json. 104 next(lines) # newline after closing } of json.
111 105
112 num_path_tuples = int(next(lines)) 106 num_path_tuples = int(next(lines))
113 path_tuples = [None] * num_path_tuples 107 path_tuples = [None] * num_path_tuples
114 for i in xrange(num_path_tuples): 108 for i in xrange(num_path_tuples):
115 path_tuples[i] = next(lines)[:-1].split('\t') 109 path_tuples[i] = next(lines)[:-1].split('\t')
116 110
117 section_names = next(lines)[:-1].split('\t') 111 section_names = next(lines)[:-1].split('\t')
118 section_counts = [int(c) for c in next(lines)[:-1].split('\t')] 112 section_counts = [int(c) for c in next(lines)[:-1].split('\t')]
(...skipping 30 matching lines...) Expand all
149 paths = path_tuples[path_indices[section_index][i]] 143 paths = path_tuples[path_indices[section_index][i]]
150 new_sym.object_path = paths[0] 144 new_sym.object_path = paths[0]
151 new_sym.source_path = paths[1] 145 new_sym.source_path = paths[1]
152 new_sym.is_anonymous = is_anonymous 146 new_sym.is_anonymous = is_anonymous
153 new_sym.padding = 0 # Derived 147 new_sym.padding = 0 # Derived
154 new_sym.full_name = None # Derived 148 new_sym.full_name = None # Derived
155 symbol_list[symbol_idx] = new_sym 149 symbol_list[symbol_idx] = new_sym
156 symbol_idx += 1 150 symbol_idx += 1
157 151
158 symbols = models.SymbolGroup(symbol_list) 152 symbols = models.SymbolGroup(symbol_list)
159 return models.SizeInfo(section_sizes, symbols, timestamp=timestamp, tag=tag) 153 return models.SizeInfo(section_sizes, symbols, metadata=metadata)
160 154
161 155
162 def SaveSizeInfo(size_info, path): 156 def SaveSizeInfo(size_info, path):
163 """Saves |size_info| to |path}.""" 157 """Saves |size_info| to |path}."""
164 if os.environ.get('MEASURE_GZIP') == '1': 158 if os.environ.get('MEASURE_GZIP') == '1':
165 with gzip.open(path, 'wb') as f: 159 with gzip.open(path, 'wb') as f:
166 _SaveSizeInfoToFile(size_info, f) 160 _SaveSizeInfoToFile(size_info, f)
167 else: 161 else:
168 # It is seconds faster to do gzip in a separate step. 6s -> 3.5s. 162 # It is seconds faster to do gzip in a separate step. 6s -> 3.5s.
169 stringio = cStringIO.StringIO() 163 stringio = cStringIO.StringIO()
170 _SaveSizeInfoToFile(size_info, stringio) 164 _SaveSizeInfoToFile(size_info, stringio)
171 165
172 logging.debug('Serialization complete. Gzipping...') 166 logging.debug('Serialization complete. Gzipping...')
173 stringio.seek(0) 167 stringio.seek(0)
174 with gzip.open(path, 'wb') as f: 168 with gzip.open(path, 'wb') as f:
175 shutil.copyfileobj(stringio, f) 169 shutil.copyfileobj(stringio, f)
176 170
177 171
178 def LoadSizeInfo(path): 172 def LoadSizeInfo(path):
179 """Returns a SizeInfo loaded from |path|.""" 173 """Returns a SizeInfo loaded from |path|."""
180 with gzip.open(path) as f: 174 with gzip.open(path) as f:
181 return _LoadSizeInfoFromFile(f) 175 return _LoadSizeInfoFromFile(f)
OLDNEW
« no previous file with comments | « tools/binary_size/describe.py ('k') | tools/binary_size/integration_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698