Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(157)

Side by Side Diff: tools/binary_size/libsupersize/archive.py

Issue 2870743003: supersize: Add symbol.template_name, and strip <>s from symbol.name (Closed)
Patch Set: canned query Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2017 The Chromium Authors. All rights reserved. 1 # Copyright 2017 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Main Python API for analyzing binary size.""" 5 """Main Python API for analyzing binary size."""
6 6
7 import argparse 7 import argparse
8 import calendar 8 import calendar
9 import collections 9 import collections
10 import datetime 10 import datetime
(...skipping 26 matching lines...) Expand all
37 return gzip.open(path, mode) 37 return gzip.open(path, mode)
38 return open(path, mode or 'r') 38 return open(path, mode or 'r')
39 39
40 40
41 def _StripLinkerAddedSymbolPrefixes(raw_symbols): 41 def _StripLinkerAddedSymbolPrefixes(raw_symbols):
42 """Removes prefixes sometimes added to symbol names during link 42 """Removes prefixes sometimes added to symbol names during link
43 43
44 Removing prefixes make symbol names match up with those found in .o files. 44 Removing prefixes make symbol names match up with those found in .o files.
45 """ 45 """
46 for symbol in raw_symbols: 46 for symbol in raw_symbols:
47 name = symbol.name 47 full_name = symbol.full_name
48 if name.startswith('startup.'): 48 if full_name.startswith('startup.'):
49 symbol.flags |= models.FLAG_STARTUP 49 symbol.flags |= models.FLAG_STARTUP
50 symbol.name = name[8:] 50 symbol.full_name = full_name[8:]
51 elif name.startswith('unlikely.'): 51 elif full_name.startswith('unlikely.'):
52 symbol.flags |= models.FLAG_UNLIKELY 52 symbol.flags |= models.FLAG_UNLIKELY
53 symbol.name = name[9:] 53 symbol.full_name = full_name[9:]
54 elif name.startswith('rel.local.'): 54 elif full_name.startswith('rel.local.'):
55 symbol.flags |= models.FLAG_REL_LOCAL 55 symbol.flags |= models.FLAG_REL_LOCAL
56 symbol.name = name[10:] 56 symbol.full_name = full_name[10:]
57 elif name.startswith('rel.'): 57 elif full_name.startswith('rel.'):
58 symbol.flags |= models.FLAG_REL 58 symbol.flags |= models.FLAG_REL
59 symbol.name = name[4:] 59 symbol.full_name = full_name[4:]
60 60
61 61
62 def _UnmangleRemainingSymbols(raw_symbols, tool_prefix): 62 def _UnmangleRemainingSymbols(raw_symbols, tool_prefix):
63 """Uses c++filt to unmangle any symbols that need it.""" 63 """Uses c++filt to unmangle any symbols that need it."""
64 to_process = [s for s in raw_symbols if s.name.startswith('_Z')] 64 to_process = [s for s in raw_symbols if s.full_name.startswith('_Z')]
65 if not to_process: 65 if not to_process:
66 return 66 return
67 67
68 logging.info('Unmangling %d names', len(to_process)) 68 logging.info('Unmangling %d names', len(to_process))
69 proc = subprocess.Popen([tool_prefix + 'c++filt'], stdin=subprocess.PIPE, 69 proc = subprocess.Popen([tool_prefix + 'c++filt'], stdin=subprocess.PIPE,
70 stdout=subprocess.PIPE) 70 stdout=subprocess.PIPE)
71 stdout = proc.communicate('\n'.join(s.name for s in to_process))[0] 71 stdout = proc.communicate('\n'.join(s.full_name for s in to_process))[0]
72 assert proc.returncode == 0 72 assert proc.returncode == 0
73 73
74 for i, line in enumerate(stdout.splitlines()): 74 for i, line in enumerate(stdout.splitlines()):
75 to_process[i].name = line 75 to_process[i].full_name = line
76 76
77 77
78 def _NormalizeNames(symbols): 78 def _NormalizeNames(raw_symbols):
79 """Ensures that all names are formatted in a useful way. 79 """Ensures that all names are formatted in a useful way.
80 80
81 This includes: 81 This includes:
82 - Assigning of |full_name|. 82 - Deriving |name| and |template_name| from |full_name|.
83 - Stripping of return types in |full_name| and |name| (for functions). 83 - Stripping of return types (for functions).
84 - Stripping parameters from |name|.
85 - Moving "vtable for" and the like to be suffixes rather than prefixes. 84 - Moving "vtable for" and the like to be suffixes rather than prefixes.
86 """ 85 """
87 found_prefixes = set() 86 found_prefixes = set()
88 for symbol in symbols: 87 for symbol in raw_symbols:
89 if symbol.name.startswith('*'): 88 if symbol.full_name.startswith('*'):
90 # See comment in _CalculatePadding() about when this 89 # See comment in _CalculatePadding() about when this
91 # can happen. 90 # can happen.
91 symbol.template_name = symbol.full_name
92 symbol.name = symbol.full_name
92 continue 93 continue
93 94
94 # E.g.: vtable for FOO 95 # E.g.: vtable for FOO
95 idx = symbol.name.find(' for ', 0, 30) 96 idx = symbol.full_name.find(' for ', 0, 30)
96 if idx != -1: 97 if idx != -1:
97 found_prefixes.add(symbol.name[:idx + 4]) 98 found_prefixes.add(symbol.full_name[:idx + 4])
98 symbol.name = symbol.name[idx + 5:] + ' [' + symbol.name[:idx] + ']' 99 symbol.full_name = (
100 symbol.full_name[idx + 5:] + ' [' + symbol.full_name[:idx] + ']')
99 101
100 # E.g.: virtual thunk to FOO 102 # E.g.: virtual thunk to FOO
101 idx = symbol.name.find(' to ', 0, 30) 103 idx = symbol.full_name.find(' to ', 0, 30)
102 if idx != -1: 104 if idx != -1:
103 found_prefixes.add(symbol.name[:idx + 3]) 105 found_prefixes.add(symbol.full_name[:idx + 3])
104 symbol.name = symbol.name[idx + 4:] + ' [' + symbol.name[:idx] + ']' 106 symbol.full_name = (
107 symbol.full_name[idx + 4:] + ' [' + symbol.full_name[:idx] + ']')
105 108
106 # Strip out return type, and identify where parameter list starts. 109 # Strip out return type, and split out name, template_name.
107 if symbol.section == 't': 110 # Function parsing also applies to non-text symbols. E.g. Function statics.
108 symbol.full_name, symbol.name = function_signature.Parse(symbol.name) 111 symbol.full_name, symbol.template_name, symbol.name = (
112 function_signature.Parse(symbol.full_name))
109 113
110 # Remove anonymous namespaces (they just harm clustering). 114 # Remove anonymous namespaces (they just harm clustering).
111 non_anonymous = symbol.name.replace('(anonymous namespace)::', '') 115 symbol.template_name = symbol.template_name.replace(
112 if symbol.name != non_anonymous: 116 '(anonymous namespace)::', '')
117 symbol.full_name = symbol.full_name.replace(
118 '(anonymous namespace)::', '')
119 non_anonymous_name = symbol.name.replace('(anonymous namespace)::', '')
120 if symbol.name != non_anonymous_name:
113 symbol.flags |= models.FLAG_ANONYMOUS 121 symbol.flags |= models.FLAG_ANONYMOUS
114 symbol.name = non_anonymous 122 symbol.name = non_anonymous_name
115 symbol.full_name = symbol.full_name.replace(
116 '(anonymous namespace)::', '')
117 123
118 if symbol.section != 't' and '(' in symbol.name: 124 # Allow using "is" to compare names (and should help with RAM).
119 # Pretty rare. Example: 125 function_signature.InternSameNames(symbol)
120 # blink::CSSValueKeywordsHash::findValueImpl(char const*)::value_word_list
121 symbol.full_name = symbol.name
122 symbol.name = re.sub(r'\(.*\)', '', symbol.full_name)
123
124 # Don't bother storing both if they are the same.
125 if symbol.full_name == symbol.name:
126 symbol.full_name = ''
127 126
128 logging.debug('Found name prefixes of: %r', found_prefixes) 127 logging.debug('Found name prefixes of: %r', found_prefixes)
129 128
130 129
131 def _NormalizeObjectPath(path): 130 def _NormalizeObjectPath(path):
132 if path.startswith('obj/'): 131 if path.startswith('obj/'):
133 # Convert obj/third_party/... -> third_party/... 132 # Convert obj/third_party/... -> third_party/...
134 path = path[4:] 133 path = path[4:]
135 elif path.startswith('../../'): 134 elif path.startswith('../../'):
136 # Convert ../../third_party/... -> third_party/... 135 # Convert ../../third_party/... -> third_party/...
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
192 191
193 # This must normalize object paths at the same time because normalization 192 # This must normalize object paths at the same time because normalization
194 # needs to occur before finding common ancestor. 193 # needs to occur before finding common ancestor.
195 def _ComputeAnscestorPathsAndNormalizeObjectPaths( 194 def _ComputeAnscestorPathsAndNormalizeObjectPaths(
196 raw_symbols, object_paths_by_name, source_mapper): 195 raw_symbols, object_paths_by_name, source_mapper):
197 num_found_paths = 0 196 num_found_paths = 0
198 num_unknown_names = 0 197 num_unknown_names = 0
199 num_path_mismatches = 0 198 num_path_mismatches = 0
200 num_unmatched_aliases = 0 199 num_unmatched_aliases = 0
201 for symbol in raw_symbols: 200 for symbol in raw_symbols:
202 name = symbol.name 201 full_name = symbol.full_name
203 if (symbol.IsBss() or 202 if (symbol.IsBss() or
204 not name or 203 not full_name or
205 name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table 204 full_name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table
206 name == 'startup'): 205 full_name == 'startup'):
207 symbol.object_path = _NormalizeObjectPath(symbol.object_path) 206 symbol.object_path = _NormalizeObjectPath(symbol.object_path)
208 continue 207 continue
209 208
210 object_paths = object_paths_by_name.get(name) 209 object_paths = object_paths_by_name.get(full_name)
211 if object_paths: 210 if object_paths:
212 num_found_paths += 1 211 num_found_paths += 1
213 else: 212 else:
214 if not symbol.object_path and symbol.aliases: 213 if not symbol.object_path and symbol.aliases:
215 # Happens when aliases are from object files where all symbols were 214 # Happens when aliases are from object files where all symbols were
216 # pruned or de-duped as aliases. Since we are only scanning .o files 215 # pruned or de-duped as aliases. Since we are only scanning .o files
217 # referenced by included symbols, such files are missed. 216 # referenced by included symbols, such files are missed.
218 # TODO(agrieve): This could be fixed by retrieving linker inputs from 217 # TODO(agrieve): This could be fixed by retrieving linker inputs from
219 # build.ninja, or by looking for paths within the .map file's 218 # build.ninja, or by looking for paths within the .map file's
220 # discarded sections. 219 # discarded sections.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 for symbol in raw_symbols: 255 for symbol in raw_symbols:
257 path = symbol.object_path 256 path = symbol.object_path
258 if path.endswith(')'): 257 if path.endswith(')'):
259 # Convert foo/bar.a(baz.o) -> foo/bar.a 258 # Convert foo/bar.a(baz.o) -> foo/bar.a
260 path = path[:path.index('(')] 259 path = path[:path.index('(')]
261 if path and path not in parsed_inputs: 260 if path and path not in parsed_inputs:
262 missed_inputs.add(path) 261 missed_inputs.add(path)
263 return missed_inputs 262 return missed_inputs
264 263
265 264
266 def _CalculatePadding(symbols): 265 def _CalculatePadding(raw_symbols):
267 """Populates the |padding| field based on symbol addresses. 266 """Populates the |padding| field based on symbol addresses.
268 267
269 Symbols must already be sorted by |address|. 268 Symbols must already be sorted by |address|.
270 """ 269 """
271 seen_sections = [] 270 seen_sections = []
272 for i, symbol in enumerate(symbols[1:]): 271 for i, symbol in enumerate(raw_symbols[1:]):
273 prev_symbol = symbols[i] 272 prev_symbol = raw_symbols[i]
274 if prev_symbol.section_name != symbol.section_name: 273 if prev_symbol.section_name != symbol.section_name:
275 assert symbol.section_name not in seen_sections, ( 274 assert symbol.section_name not in seen_sections, (
276 'Input symbols must be sorted by section, then address.') 275 'Input symbols must be sorted by section, then address.')
277 seen_sections.append(symbol.section_name) 276 seen_sections.append(symbol.section_name)
278 continue 277 continue
279 if symbol.address <= 0 or prev_symbol.address <= 0: 278 if symbol.address <= 0 or prev_symbol.address <= 0:
280 continue 279 continue
281 280
282 if symbol.address == prev_symbol.address: 281 if symbol.address == prev_symbol.address:
283 if symbol.aliases and symbol.aliases is prev_symbol.aliases: 282 if symbol.aliases and symbol.aliases is prev_symbol.aliases:
284 symbol.padding = prev_symbol.padding 283 symbol.padding = prev_symbol.padding
285 symbol.size = prev_symbol.size 284 symbol.size = prev_symbol.size
286 continue 285 continue
287 # Padding-only symbols happen for ** symbol gaps. 286 # Padding-only symbols happen for ** symbol gaps.
288 assert prev_symbol.size_without_padding == 0, ( 287 assert prev_symbol.size_without_padding == 0, (
289 'Found duplicate symbols:\n%r\n%r' % (prev_symbol, symbol)) 288 'Found duplicate symbols:\n%r\n%r' % (prev_symbol, symbol))
290 289
291 padding = symbol.address - prev_symbol.end_address 290 padding = symbol.address - prev_symbol.end_address
292 # These thresholds were found by experimenting with arm32 Chrome. 291 # These thresholds were found by experimenting with arm32 Chrome.
293 # E.g.: Set them to 0 and see what warnings get logged, then take max value. 292 # E.g.: Set them to 0 and see what warnings get logged, then take max value.
294 # TODO(agrieve): See if these thresholds make sense for architectures 293 # TODO(agrieve): See if these thresholds make sense for architectures
295 # other than arm32. 294 # other than arm32.
296 if not symbol.name.startswith('*') and ( 295 if not symbol.full_name.startswith('*') and (
297 symbol.section in 'rd' and padding >= 256 or 296 symbol.section in 'rd' and padding >= 256 or
298 symbol.section in 't' and padding >= 64): 297 symbol.section in 't' and padding >= 64):
299 # Should not happen. 298 # Should not happen.
300 logging.warning('Large padding of %d between:\n A) %r\n B) %r' % ( 299 logging.warning('Large padding of %d between:\n A) %r\n B) %r' % (
301 padding, prev_symbol, symbol)) 300 padding, prev_symbol, symbol))
302 symbol.padding = padding 301 symbol.padding = padding
303 symbol.size += padding 302 symbol.size += padding
304 assert symbol.size >= 0, ( 303 assert symbol.size >= 0, (
305 'Symbol has negative size (likely not sorted propertly): ' 304 'Symbol has negative size (likely not sorted propertly): '
306 '%r\nprev symbol: %r' % (symbol, prev_symbol)) 305 '%r\nprev symbol: %r' % (symbol, prev_symbol))
307 306
308 307
309 def _AddSymbolAliases(raw_symbols, aliases_by_address): 308 def _AddSymbolAliases(raw_symbols, aliases_by_address):
310 # Step 1: Create list of (index_of_symbol, name_list). 309 # Step 1: Create list of (index_of_symbol, name_list).
311 logging.debug('Creating alias list') 310 logging.debug('Creating alias list')
312 replacements = [] 311 replacements = []
313 num_new_symbols = 0 312 num_new_symbols = 0
314 for i, s in enumerate(raw_symbols): 313 for i, s in enumerate(raw_symbols):
315 # Don't alias padding-only symbols (e.g. ** symbol gap) 314 # Don't alias padding-only symbols (e.g. ** symbol gap)
316 if s.size_without_padding == 0: 315 if s.size_without_padding == 0:
317 continue 316 continue
318 name_list = aliases_by_address.get(s.address) 317 name_list = aliases_by_address.get(s.address)
319 if name_list: 318 if name_list:
320 if s.name not in name_list: 319 if s.full_name not in name_list:
321 logging.warning('Name missing from aliases: %s %s', s.name, name_list) 320 logging.warning('Name missing from aliases: %s %s', s.full_name,
321 name_list)
322 continue 322 continue
323 replacements.append((i, name_list)) 323 replacements.append((i, name_list))
324 num_new_symbols += len(name_list) - 1 324 num_new_symbols += len(name_list) - 1
325 325
326 if float(num_new_symbols) / len(raw_symbols) < .1: 326 if float(num_new_symbols) / len(raw_symbols) < .05:
327 logging.warning('Number of aliases is oddly low (%.0f%%). It should ' 327 logging.warning('Number of aliases is oddly low (%.0f%%). It should '
328 'usually be around 25%%. Ensure --tool-prefix is correct.', 328 'usually be around 25%%. Ensure --tool-prefix is correct.',
329 float(num_new_symbols) / len(raw_symbols) * 100) 329 float(num_new_symbols) / len(raw_symbols) * 100)
330 330
331 # Step 2: Create new symbols as siblings to each existing one. 331 # Step 2: Create new symbols as siblings to each existing one.
332 logging.debug('Creating %d aliases', num_new_symbols) 332 logging.debug('Creating %d aliases', num_new_symbols)
333 src_cursor_end = len(raw_symbols) 333 src_cursor_end = len(raw_symbols)
334 raw_symbols += [None] * num_new_symbols 334 raw_symbols += [None] * num_new_symbols
335 dst_cursor_end = len(raw_symbols) 335 dst_cursor_end = len(raw_symbols)
336 for src_index, name_list in reversed(replacements): 336 for src_index, name_list in reversed(replacements):
337 # Copy over symbols that come after the current one. 337 # Copy over symbols that come after the current one.
338 chunk_size = src_cursor_end - src_index - 1 338 chunk_size = src_cursor_end - src_index - 1
339 dst_cursor_end -= chunk_size 339 dst_cursor_end -= chunk_size
340 src_cursor_end -= chunk_size 340 src_cursor_end -= chunk_size
341 raw_symbols[dst_cursor_end:dst_cursor_end + chunk_size] = ( 341 raw_symbols[dst_cursor_end:dst_cursor_end + chunk_size] = (
342 raw_symbols[src_cursor_end:src_cursor_end + chunk_size]) 342 raw_symbols[src_cursor_end:src_cursor_end + chunk_size])
343 sym = raw_symbols[src_index] 343 sym = raw_symbols[src_index]
344 src_cursor_end -= 1 344 src_cursor_end -= 1
345 345
346 # Create aliases (does not bother reusing the existing symbol). 346 # Create aliases (does not bother reusing the existing symbol).
347 aliases = [None] * len(name_list) 347 aliases = [None] * len(name_list)
348 for i, name in enumerate(name_list): 348 for i, full_name in enumerate(name_list):
349 aliases[i] = models.Symbol( 349 aliases[i] = models.Symbol(
350 sym.section_name, sym.size, address=sym.address, name=name, 350 sym.section_name, sym.size, address=sym.address, full_name=full_name,
351 aliases=aliases) 351 aliases=aliases)
352 352
353 dst_cursor_end -= len(aliases) 353 dst_cursor_end -= len(aliases)
354 raw_symbols[dst_cursor_end:dst_cursor_end + len(aliases)] = aliases 354 raw_symbols[dst_cursor_end:dst_cursor_end + len(aliases)] = aliases
355 355
356 assert dst_cursor_end == src_cursor_end 356 assert dst_cursor_end == src_cursor_end
357 357
358 358
359 def LoadAndPostProcessSizeInfo(path): 359 def LoadAndPostProcessSizeInfo(path):
360 """Returns a SizeInfo for the given |path|.""" 360 """Returns a SizeInfo for the given |path|."""
361 logging.debug('Loading results from: %s', path) 361 logging.debug('Loading results from: %s', path)
362 size_info = file_format.LoadSizeInfo(path) 362 size_info = file_format.LoadSizeInfo(path)
363 _PostProcessSizeInfo(size_info) 363 _PostProcessSizeInfo(size_info)
364 return size_info 364 return size_info
365 365
366 366
367 def _PostProcessSizeInfo(size_info): 367 def _PostProcessSizeInfo(size_info):
368 logging.info('Normalizing symbol names') 368 logging.info('Normalizing symbol names')
369 _NormalizeNames(size_info.symbols) 369 _NormalizeNames(size_info.raw_symbols)
370 logging.info('Calculating padding') 370 logging.info('Calculating padding')
371 _CalculatePadding(size_info.symbols) 371 _CalculatePadding(size_info.raw_symbols)
372 logging.info('Processed %d symbols', len(size_info.symbols)) 372 logging.info('Processed %d symbols', len(size_info.raw_symbols))
373 373
374 374
375 def CreateMetadata(map_path, elf_path, apk_path, tool_prefix, output_directory): 375 def CreateMetadata(map_path, elf_path, apk_path, tool_prefix, output_directory):
376 metadata = None 376 metadata = None
377 if elf_path: 377 if elf_path:
378 logging.debug('Constructing metadata') 378 logging.debug('Constructing metadata')
379 git_rev = _DetectGitRevision(os.path.dirname(elf_path)) 379 git_rev = _DetectGitRevision(os.path.dirname(elf_path))
380 architecture = _ArchFromElf(elf_path, tool_prefix) 380 architecture = _ArchFromElf(elf_path, tool_prefix)
381 build_id = BuildIdFromElf(elf_path, tool_prefix) 381 build_id = BuildIdFromElf(elf_path, tool_prefix)
382 timestamp_obj = datetime.datetime.utcfromtimestamp(os.path.getmtime( 382 timestamp_obj = datetime.datetime.utcfromtimestamp(os.path.getmtime(
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 len(object_paths_by_name), 496 len(object_paths_by_name),
497 len(elf_object_paths) + len(missed_object_paths)) 497 len(elf_object_paths) + len(missed_object_paths))
498 _ComputeAnscestorPathsAndNormalizeObjectPaths( 498 _ComputeAnscestorPathsAndNormalizeObjectPaths(
499 raw_symbols, object_paths_by_name, source_mapper) 499 raw_symbols, object_paths_by_name, source_mapper)
500 500
501 if not elf_path or not output_directory: 501 if not elf_path or not output_directory:
502 logging.info('Normalizing object paths.') 502 logging.info('Normalizing object paths.')
503 for symbol in raw_symbols: 503 for symbol in raw_symbols:
504 symbol.object_path = _NormalizeObjectPath(symbol.object_path) 504 symbol.object_path = _NormalizeObjectPath(symbol.object_path)
505 505
506 size_info = models.SizeInfo(section_sizes, models.SymbolGroup(raw_symbols)) 506 size_info = models.SizeInfo(section_sizes, raw_symbols)
507 507
508 # Name normalization not strictly required, but makes for smaller files. 508 # Name normalization not strictly required, but makes for smaller files.
509 if raw_only: 509 if raw_only:
510 logging.info('Normalizing symbol names') 510 logging.info('Normalizing symbol names')
511 _NormalizeNames(size_info.symbols) 511 _NormalizeNames(size_info.raw_symbols)
512 else: 512 else:
513 _PostProcessSizeInfo(size_info) 513 _PostProcessSizeInfo(size_info)
514 514
515 if logging.getLogger().isEnabledFor(logging.DEBUG): 515 if logging.getLogger().isEnabledFor(logging.DEBUG):
516 # Padding is reported in size coverage logs. 516 # Padding is reported in size coverage logs.
517 if raw_only: 517 if raw_only:
518 _CalculatePadding(size_info.symbols) 518 _CalculatePadding(size_info.raw_symbols)
519 for line in describe.DescribeSizeInfoCoverage(size_info): 519 for line in describe.DescribeSizeInfoCoverage(size_info):
520 logging.info(line) 520 logging.info(line)
521 logging.info('Recorded info for %d symbols', len(size_info.symbols)) 521 logging.info('Recorded info for %d symbols', len(size_info.raw_symbols))
522 return size_info 522 return size_info
523 523
524 524
525 def _DetectGitRevision(directory): 525 def _DetectGitRevision(directory):
526 try: 526 try:
527 git_rev = subprocess.check_output( 527 git_rev = subprocess.check_output(
528 ['git', '-C', directory, 'rev-parse', 'HEAD']) 528 ['git', '-C', directory, 'rev-parse', 'HEAD'])
529 return git_rev.rstrip() 529 return git_rev.rstrip()
530 except Exception: 530 except Exception:
531 logging.warning('Failed to detect git revision for file metadata.') 531 logging.warning('Failed to detect git revision for file metadata.')
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
690 logging.warning('Packed section not present: %s', packed_section_name) 690 logging.warning('Packed section not present: %s', packed_section_name)
691 else: 691 else:
692 size_info.section_sizes['%s (unpacked)' % packed_section_name] = ( 692 size_info.section_sizes['%s (unpacked)' % packed_section_name] = (
693 unstripped_section_sizes.get(packed_section_name)) 693 unstripped_section_sizes.get(packed_section_name))
694 694
695 logging.info('Recording metadata: \n %s', 695 logging.info('Recording metadata: \n %s',
696 '\n '.join(describe.DescribeMetadata(size_info.metadata))) 696 '\n '.join(describe.DescribeMetadata(size_info.metadata)))
697 logging.info('Saving result to %s', args.size_file) 697 logging.info('Saving result to %s', args.size_file)
698 file_format.SaveSizeInfo(size_info, args.size_file) 698 file_format.SaveSizeInfo(size_info, args.size_file)
699 logging.info('Done') 699 logging.info('Done')
OLDNEW
« no previous file with comments | « no previous file | tools/binary_size/libsupersize/canned_queries.py » ('j') | tools/binary_size/libsupersize/cluster_symbols.py » ('J')

Powered by Google App Engine
This is Rietveld 408576698