OLD | NEW |
1 # Copyright 2017 The Chromium Authors. All rights reserved. | 1 # Copyright 2017 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Main Python API for analyzing binary size.""" | 5 """Main Python API for analyzing binary size.""" |
6 | 6 |
7 import argparse | 7 import argparse |
8 import calendar | 8 import calendar |
9 import collections | 9 import collections |
10 import datetime | 10 import datetime |
(...skipping 20 matching lines...) Expand all Loading... |
31 | 31 |
32 def _OpenMaybeGz(path, mode=None): | 32 def _OpenMaybeGz(path, mode=None): |
33 """Calls `gzip.open()` if |path| ends in ".gz", otherwise calls `open()`.""" | 33 """Calls `gzip.open()` if |path| ends in ".gz", otherwise calls `open()`.""" |
34 if path.endswith('.gz'): | 34 if path.endswith('.gz'): |
35 if mode and 'w' in mode: | 35 if mode and 'w' in mode: |
36 return gzip.GzipFile(path, mode, 1) | 36 return gzip.GzipFile(path, mode, 1) |
37 return gzip.open(path, mode) | 37 return gzip.open(path, mode) |
38 return open(path, mode or 'r') | 38 return open(path, mode or 'r') |
39 | 39 |
40 | 40 |
41 def _StripLinkerAddedSymbolPrefixes(symbols): | 41 def _StripLinkerAddedSymbolPrefixes(raw_symbols): |
42 """Removes prefixes sometimes added to symbol names during link | 42 """Removes prefixes sometimes added to symbol names during link |
43 | 43 |
44 Removing prefixes make symbol names match up with those found in .o files. | 44 Removing prefixes make symbol names match up with those found in .o files. |
45 """ | 45 """ |
46 for symbol in symbols: | 46 for symbol in raw_symbols: |
47 name = symbol.name | 47 name = symbol.name |
48 if name.startswith('startup.'): | 48 if name.startswith('startup.'): |
49 symbol.flags |= models.FLAG_STARTUP | 49 symbol.flags |= models.FLAG_STARTUP |
50 symbol.name = name[8:] | 50 symbol.name = name[8:] |
51 elif name.startswith('unlikely.'): | 51 elif name.startswith('unlikely.'): |
52 symbol.flags |= models.FLAG_UNLIKELY | 52 symbol.flags |= models.FLAG_UNLIKELY |
53 symbol.name = name[9:] | 53 symbol.name = name[9:] |
54 elif name.startswith('rel.local.'): | 54 elif name.startswith('rel.local.'): |
55 symbol.flags |= models.FLAG_REL_LOCAL | 55 symbol.flags |= models.FLAG_REL_LOCAL |
56 symbol.name = name[10:] | 56 symbol.name = name[10:] |
57 elif name.startswith('rel.'): | 57 elif name.startswith('rel.'): |
58 symbol.flags |= models.FLAG_REL | 58 symbol.flags |= models.FLAG_REL |
59 symbol.name = name[4:] | 59 symbol.name = name[4:] |
60 | 60 |
61 | 61 |
62 def _UnmangleRemainingSymbols(symbols, tool_prefix): | 62 def _UnmangleRemainingSymbols(raw_symbols, tool_prefix): |
63 """Uses c++filt to unmangle any symbols that need it.""" | 63 """Uses c++filt to unmangle any symbols that need it.""" |
64 to_process = [s for s in symbols if s.name.startswith('_Z')] | 64 to_process = [s for s in raw_symbols if s.name.startswith('_Z')] |
65 if not to_process: | 65 if not to_process: |
66 return | 66 return |
67 | 67 |
68 logging.info('Unmangling %d names', len(to_process)) | 68 logging.info('Unmangling %d names', len(to_process)) |
69 proc = subprocess.Popen([tool_prefix + 'c++filt'], stdin=subprocess.PIPE, | 69 proc = subprocess.Popen([tool_prefix + 'c++filt'], stdin=subprocess.PIPE, |
70 stdout=subprocess.PIPE) | 70 stdout=subprocess.PIPE) |
71 stdout = proc.communicate('\n'.join(s.name for s in to_process))[0] | 71 stdout = proc.communicate('\n'.join(s.name for s in to_process))[0] |
72 assert proc.returncode == 0 | 72 assert proc.returncode == 0 |
73 | 73 |
74 for i, line in enumerate(stdout.splitlines()): | 74 for i, line in enumerate(stdout.splitlines()): |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
154 | 154 |
155 def _SourcePathForObjectPath(object_path, source_mapper): | 155 def _SourcePathForObjectPath(object_path, source_mapper): |
156 # We don't have source info for prebuilt .a files. | 156 # We don't have source info for prebuilt .a files. |
157 if not os.path.isabs(object_path) and not object_path.startswith('..'): | 157 if not os.path.isabs(object_path) and not object_path.startswith('..'): |
158 source_path = source_mapper.FindSourceForPath(object_path) | 158 source_path = source_mapper.FindSourceForPath(object_path) |
159 if source_path: | 159 if source_path: |
160 return _NormalizeSourcePath(source_path) | 160 return _NormalizeSourcePath(source_path) |
161 return '' | 161 return '' |
162 | 162 |
163 | 163 |
164 def _ExtractSourcePaths(symbols, source_mapper): | 164 def _ExtractSourcePaths(raw_symbols, source_mapper): |
165 """Fills in the |source_path| attribute.""" | 165 """Fills in the |source_path| attribute.""" |
166 logging.debug('Parsed %d .ninja files.', source_mapper.parsed_file_count) | 166 logging.debug('Parsed %d .ninja files.', source_mapper.parsed_file_count) |
167 for symbol in symbols: | 167 for symbol in raw_symbols: |
168 object_path = symbol.object_path | 168 object_path = symbol.object_path |
169 if object_path and not symbol.source_path: | 169 if object_path and not symbol.source_path: |
170 symbol.source_path = _SourcePathForObjectPath(object_path, source_mapper) | 170 symbol.source_path = _SourcePathForObjectPath(object_path, source_mapper) |
171 | 171 |
172 | 172 |
173 def _ComputeAnscestorPath(path_list): | 173 def _ComputeAnscestorPath(path_list): |
174 """Returns the common anscestor of the given paths.""" | 174 """Returns the common anscestor of the given paths.""" |
175 # Ignore missing paths. | 175 # Ignore missing paths. |
176 path_list = [p for p in path_list if p] | 176 path_list = [p for p in path_list if p] |
177 prefix = os.path.commonprefix(path_list) | 177 prefix = os.path.commonprefix(path_list) |
178 # Put the path count as a subdirectory to allow for better grouping when | 178 # Put the path count as a subdirectory to allow for better grouping when |
179 # path-based breakdowns. | 179 # path-based breakdowns. |
180 if not prefix: | 180 if not prefix: |
181 if len(path_list) < 2: | 181 if len(path_list) < 2: |
182 return '' | 182 return '' |
183 return os.path.join('{shared}', str(len(path_list))) | 183 return os.path.join('{shared}', str(len(path_list))) |
184 if prefix == path_list[0]: | 184 if prefix == path_list[0]: |
185 return prefix | 185 return prefix |
186 assert len(path_list) > 1, 'path_list: ' + repr(path_list) | 186 assert len(path_list) > 1, 'path_list: ' + repr(path_list) |
187 return os.path.join(os.path.dirname(prefix), '{shared}', str(len(path_list))) | 187 return os.path.join(os.path.dirname(prefix), '{shared}', str(len(path_list))) |
188 | 188 |
189 | 189 |
190 # This must normalize object paths at the same time because normalization | 190 # This must normalize object paths at the same time because normalization |
191 # needs to occur before finding common ancestor. | 191 # needs to occur before finding common ancestor. |
192 def _ComputeAnscestorPathsAndNormalizeObjectPaths( | 192 def _ComputeAnscestorPathsAndNormalizeObjectPaths( |
193 symbols, object_paths_by_name, source_mapper): | 193 raw_symbols, object_paths_by_name, source_mapper): |
194 num_found_paths = 0 | 194 num_found_paths = 0 |
195 num_unknown_names = 0 | 195 num_unknown_names = 0 |
196 num_path_mismatches = 0 | 196 num_path_mismatches = 0 |
197 num_unmatched_aliases = 0 | 197 num_unmatched_aliases = 0 |
198 for symbol in symbols: | 198 for symbol in raw_symbols: |
199 name = symbol.name | 199 name = symbol.name |
200 if (symbol.IsBss() or | 200 if (symbol.IsBss() or |
201 not name or | 201 not name or |
202 name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table | 202 name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table |
203 name == 'startup'): | 203 name == 'startup'): |
204 symbol.object_path = _NormalizeObjectPath(symbol.object_path) | 204 symbol.object_path = _NormalizeObjectPath(symbol.object_path) |
205 continue | 205 continue |
206 | 206 |
207 object_paths = object_paths_by_name.get(name) | 207 object_paths = object_paths_by_name.get(name) |
208 if object_paths: | 208 if object_paths: |
(...skipping 28 matching lines...) Expand all Loading... |
237 | 237 |
238 object_paths = [_NormalizeObjectPath(p) for p in object_paths] | 238 object_paths = [_NormalizeObjectPath(p) for p in object_paths] |
239 symbol.object_path = _ComputeAnscestorPath(object_paths) | 239 symbol.object_path = _ComputeAnscestorPath(object_paths) |
240 | 240 |
241 logging.debug('Cross-referenced %d symbols with nm output. ' | 241 logging.debug('Cross-referenced %d symbols with nm output. ' |
242 'num_unknown_names=%d num_path_mismatches=%d ' | 242 'num_unknown_names=%d num_path_mismatches=%d ' |
243 'num_unused_aliases=%d', num_found_paths, num_unknown_names, | 243 'num_unused_aliases=%d', num_found_paths, num_unknown_names, |
244 num_path_mismatches, num_unmatched_aliases) | 244 num_path_mismatches, num_unmatched_aliases) |
245 | 245 |
246 | 246 |
247 def _DiscoverMissedObjectPaths(symbols, elf_object_paths): | 247 def _DiscoverMissedObjectPaths(raw_symbols, elf_object_paths): |
248 # Missing object paths are caused by .a files added by -l flags, which are not | 248 # Missing object paths are caused by .a files added by -l flags, which are not |
249 # listed as explicit inputs within .ninja rules. | 249 # listed as explicit inputs within .ninja rules. |
250 parsed_inputs = set(elf_object_paths) | 250 parsed_inputs = set(elf_object_paths) |
251 missed_inputs = set() | 251 missed_inputs = set() |
252 for symbol in symbols: | 252 for symbol in raw_symbols: |
253 path = symbol.object_path | 253 path = symbol.object_path |
254 if path.endswith(')'): | 254 if path.endswith(')'): |
255 # Convert foo/bar.a(baz.o) -> foo/bar.a | 255 # Convert foo/bar.a(baz.o) -> foo/bar.a |
256 path = path[:path.index('(')] | 256 path = path[:path.index('(')] |
257 if path and path not in parsed_inputs: | 257 if path and path not in parsed_inputs: |
258 missed_inputs.add(path) | 258 missed_inputs.add(path) |
259 return missed_inputs | 259 return missed_inputs |
260 | 260 |
261 | 261 |
262 def _CalculatePadding(symbols): | 262 def _CalculatePadding(symbols): |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
295 # Should not happen. | 295 # Should not happen. |
296 logging.warning('Large padding of %d between:\n A) %r\n B) %r' % ( | 296 logging.warning('Large padding of %d between:\n A) %r\n B) %r' % ( |
297 padding, prev_symbol, symbol)) | 297 padding, prev_symbol, symbol)) |
298 symbol.padding = padding | 298 symbol.padding = padding |
299 symbol.size += padding | 299 symbol.size += padding |
300 assert symbol.size >= 0, ( | 300 assert symbol.size >= 0, ( |
301 'Symbol has negative size (likely not sorted propertly): ' | 301 'Symbol has negative size (likely not sorted propertly): ' |
302 '%r\nprev symbol: %r' % (symbol, prev_symbol)) | 302 '%r\nprev symbol: %r' % (symbol, prev_symbol)) |
303 | 303 |
304 | 304 |
305 def _ClusterSymbols(symbols): | 305 def _AddSymbolAliases(raw_symbols, aliases_by_address): |
306 """Returns a new list of symbols with some symbols moved into groups. | |
307 | |
308 Groups include: | |
309 * Symbols that have [clone] in their name (created by compiler optimization). | |
310 * Star symbols (such as "** merge strings", and "** symbol gap") | |
311 | |
312 To view created groups: | |
313 Print(size_info.symbols.Filter(lambda s: s.IsGroup()), recursive=True) | |
314 """ | |
315 # http://unix.stackexchange.com/questions/223013/function-symbol-gets-part-suf
fix-after-compilation | |
316 # Example name suffixes: | |
317 # [clone .part.322] # GCC | |
318 # [clone .isra.322] # GCC | |
319 # [clone .constprop.1064] # GCC | |
320 # [clone .11064] # clang | |
321 | |
322 # Step 1: Create name map, find clones, collect star syms into replacements. | |
323 logging.debug('Creating name -> symbol map') | |
324 clone_indices = [] | |
325 indices_by_full_name = {} | |
326 # (name, full_name) -> [(index, sym),...] | |
327 replacements_by_name = collections.defaultdict(list) | |
328 for i, symbol in enumerate(symbols): | |
329 if symbol.name.startswith('**'): | |
330 # "symbol gap 3" -> "symbol gaps" | |
331 name = re.sub(r'\s+\d+$', 's', symbol.name) | |
332 replacements_by_name[(name, None)].append((i, symbol)) | |
333 elif symbol.full_name: | |
334 if symbol.full_name.endswith(']') and ' [clone ' in symbol.full_name: | |
335 clone_indices.append(i) | |
336 else: | |
337 indices_by_full_name[symbol.full_name] = i | |
338 | |
339 # Step 2: Collect same-named clone symbols. | |
340 logging.debug('Grouping all clones') | |
341 group_names_by_index = {} | |
342 for i in clone_indices: | |
343 symbol = symbols[i] | |
344 # Multiple attributes could exist, so search from left-to-right. | |
345 stripped_name = symbol.name[:symbol.name.index(' [clone ')] | |
346 stripped_full_name = symbol.full_name[:symbol.full_name.index(' [clone ')] | |
347 name_tup = (stripped_name, stripped_full_name) | |
348 replacement_list = replacements_by_name[name_tup] | |
349 | |
350 if not replacement_list: | |
351 # First occurance, check for non-clone symbol. | |
352 non_clone_idx = indices_by_full_name.get(stripped_name) | |
353 if non_clone_idx is not None: | |
354 non_clone_symbol = symbols[non_clone_idx] | |
355 replacement_list.append((non_clone_idx, non_clone_symbol)) | |
356 group_names_by_index[non_clone_idx] = stripped_name | |
357 | |
358 replacement_list.append((i, symbol)) | |
359 group_names_by_index[i] = stripped_name | |
360 | |
361 # Step 3: Undo clustering when length=1. | |
362 # Removing these groups means Diff() logic must know about [clone] suffix. | |
363 to_clear = [] | |
364 for name_tup, replacement_list in replacements_by_name.iteritems(): | |
365 if len(replacement_list) == 1: | |
366 to_clear.append(name_tup) | |
367 for name_tup in to_clear: | |
368 del replacements_by_name[name_tup] | |
369 | |
370 # Step 4: Replace first symbol from each cluster with a SymbolGroup. | |
371 before_symbol_count = sum(len(x) for x in replacements_by_name.itervalues()) | |
372 logging.debug('Creating %d symbol groups from %d symbols. %d clones had only ' | |
373 'one symbol.', len(replacements_by_name), before_symbol_count, | |
374 len(to_clear)) | |
375 | |
376 len_delta = len(replacements_by_name) - before_symbol_count | |
377 grouped_symbols = [None] * (len(symbols) + len_delta) | |
378 dest_index = 0 | |
379 src_index = 0 | |
380 seen_names = set() | |
381 replacement_names_by_index = {} | |
382 for name_tup, replacement_list in replacements_by_name.iteritems(): | |
383 for tup in replacement_list: | |
384 replacement_names_by_index[tup[0]] = name_tup | |
385 | |
386 sorted_items = replacement_names_by_index.items() | |
387 sorted_items.sort(key=lambda tup: tup[0]) | |
388 for index, name_tup in sorted_items: | |
389 count = index - src_index | |
390 grouped_symbols[dest_index:dest_index + count] = ( | |
391 symbols[src_index:src_index + count]) | |
392 src_index = index + 1 | |
393 dest_index += count | |
394 if name_tup not in seen_names: | |
395 seen_names.add(name_tup) | |
396 group_symbols = [tup[1] for tup in replacements_by_name[name_tup]] | |
397 grouped_symbols[dest_index] = models.SymbolGroup( | |
398 group_symbols, name=name_tup[0], full_name=name_tup[1], | |
399 section_name=group_symbols[0].section_name) | |
400 dest_index += 1 | |
401 | |
402 assert len(grouped_symbols[dest_index:None]) == len(symbols[src_index:None]) | |
403 grouped_symbols[dest_index:None] = symbols[src_index:None] | |
404 logging.debug('Finished making groups.') | |
405 return grouped_symbols | |
406 | |
407 | |
408 def _AddSymbolAliases(symbols, aliases_by_address): | |
409 # Step 1: Create list of (index_of_symbol, name_list). | 306 # Step 1: Create list of (index_of_symbol, name_list). |
410 logging.debug('Creating alias list') | 307 logging.debug('Creating alias list') |
411 replacements = [] | 308 replacements = [] |
412 num_new_symbols = 0 | 309 num_new_symbols = 0 |
413 for i, s in enumerate(symbols): | 310 for i, s in enumerate(raw_symbols): |
414 # Don't alias padding-only symbols (e.g. ** symbol gap) | 311 # Don't alias padding-only symbols (e.g. ** symbol gap) |
415 if s.size_without_padding == 0: | 312 if s.size_without_padding == 0: |
416 continue | 313 continue |
417 name_list = aliases_by_address.get(s.address) | 314 name_list = aliases_by_address.get(s.address) |
418 if name_list: | 315 if name_list: |
419 if s.name not in name_list: | 316 if s.name not in name_list: |
420 logging.warning('Name missing from aliases: %s %s', s.name, name_list) | 317 logging.warning('Name missing from aliases: %s %s', s.name, name_list) |
421 continue | 318 continue |
422 replacements.append((i, name_list)) | 319 replacements.append((i, name_list)) |
423 num_new_symbols += len(name_list) - 1 | 320 num_new_symbols += len(name_list) - 1 |
424 | 321 |
425 # Step 2: Create new symbols as siblings to each existing one. | 322 # Step 2: Create new symbols as siblings to each existing one. |
426 logging.debug('Creating %d aliases', num_new_symbols) | 323 logging.debug('Creating %d aliases', num_new_symbols) |
427 src_cursor_end = len(symbols) | 324 src_cursor_end = len(raw_symbols) |
428 symbols += [None] * num_new_symbols | 325 raw_symbols += [None] * num_new_symbols |
429 dst_cursor_end = len(symbols) | 326 dst_cursor_end = len(raw_symbols) |
430 for src_index, name_list in reversed(replacements): | 327 for src_index, name_list in reversed(replacements): |
431 # Copy over symbols that come after the current one. | 328 # Copy over symbols that come after the current one. |
432 chunk_size = src_cursor_end - src_index - 1 | 329 chunk_size = src_cursor_end - src_index - 1 |
433 dst_cursor_end -= chunk_size | 330 dst_cursor_end -= chunk_size |
434 src_cursor_end -= chunk_size | 331 src_cursor_end -= chunk_size |
435 symbols[dst_cursor_end:dst_cursor_end + chunk_size] = ( | 332 raw_symbols[dst_cursor_end:dst_cursor_end + chunk_size] = ( |
436 symbols[src_cursor_end:src_cursor_end + chunk_size]) | 333 raw_symbols[src_cursor_end:src_cursor_end + chunk_size]) |
437 sym = symbols[src_index] | 334 sym = raw_symbols[src_index] |
438 src_cursor_end -= 1 | 335 src_cursor_end -= 1 |
439 | 336 |
440 # Create aliases (does not bother reusing the existing symbol). | 337 # Create aliases (does not bother reusing the existing symbol). |
441 aliases = [None] * len(name_list) | 338 aliases = [None] * len(name_list) |
442 for i, name in enumerate(name_list): | 339 for i, name in enumerate(name_list): |
443 aliases[i] = models.Symbol( | 340 aliases[i] = models.Symbol( |
444 sym.section_name, sym.size, address=sym.address, name=name, | 341 sym.section_name, sym.size, address=sym.address, name=name, |
445 aliases=aliases) | 342 aliases=aliases) |
446 | 343 |
447 dst_cursor_end -= len(aliases) | 344 dst_cursor_end -= len(aliases) |
448 symbols[dst_cursor_end:dst_cursor_end + len(aliases)] = aliases | 345 raw_symbols[dst_cursor_end:dst_cursor_end + len(aliases)] = aliases |
449 | 346 |
450 assert dst_cursor_end == src_cursor_end | 347 assert dst_cursor_end == src_cursor_end |
451 | 348 |
452 | 349 |
453 def LoadAndPostProcessSizeInfo(path): | 350 def LoadAndPostProcessSizeInfo(path): |
454 """Returns a SizeInfo for the given |path|.""" | 351 """Returns a SizeInfo for the given |path|.""" |
455 logging.debug('Loading results from: %s', path) | 352 logging.debug('Loading results from: %s', path) |
456 size_info = file_format.LoadSizeInfo(path) | 353 size_info = file_format.LoadSizeInfo(path) |
457 _PostProcessSizeInfo(size_info) | 354 _PostProcessSizeInfo(size_info) |
458 return size_info | 355 return size_info |
459 | 356 |
460 | 357 |
461 def _PostProcessSizeInfo(size_info): | 358 def _PostProcessSizeInfo(size_info): |
462 logging.info('Normalizing symbol names') | 359 logging.info('Normalizing symbol names') |
463 _NormalizeNames(size_info.raw_symbols) | 360 _NormalizeNames(size_info.symbols) |
464 logging.info('Calculating padding') | 361 logging.info('Calculating padding') |
465 _CalculatePadding(size_info.raw_symbols) | 362 _CalculatePadding(size_info.symbols) |
466 logging.info('Grouping decomposed functions') | 363 logging.info('Processed %d symbols', len(size_info.symbols)) |
467 size_info.symbols = models.SymbolGroup( | |
468 _ClusterSymbols(size_info.raw_symbols)) | |
469 logging.info('Processed %d symbols', len(size_info.raw_symbols)) | |
470 | 364 |
471 | 365 |
472 def CreateMetadata(map_path, elf_path, apk_path, tool_prefix, output_directory): | 366 def CreateMetadata(map_path, elf_path, apk_path, tool_prefix, output_directory): |
473 metadata = None | 367 metadata = None |
474 if elf_path: | 368 if elf_path: |
475 logging.debug('Constructing metadata') | 369 logging.debug('Constructing metadata') |
476 git_rev = _DetectGitRevision(os.path.dirname(elf_path)) | 370 git_rev = _DetectGitRevision(os.path.dirname(elf_path)) |
477 architecture = _ArchFromElf(elf_path, tool_prefix) | 371 architecture = _ArchFromElf(elf_path, tool_prefix) |
478 build_id = BuildIdFromElf(elf_path, tool_prefix) | 372 build_id = BuildIdFromElf(elf_path, tool_prefix) |
479 timestamp_obj = datetime.datetime.utcfromtimestamp(os.path.getmtime( | 373 timestamp_obj = datetime.datetime.utcfromtimestamp(os.path.getmtime( |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
592 len(object_paths_by_name), | 486 len(object_paths_by_name), |
593 len(elf_object_paths) + len(missed_object_paths)) | 487 len(elf_object_paths) + len(missed_object_paths)) |
594 _ComputeAnscestorPathsAndNormalizeObjectPaths( | 488 _ComputeAnscestorPathsAndNormalizeObjectPaths( |
595 raw_symbols, object_paths_by_name, source_mapper) | 489 raw_symbols, object_paths_by_name, source_mapper) |
596 | 490 |
597 if not elf_path or not output_directory: | 491 if not elf_path or not output_directory: |
598 logging.info('Normalizing object paths.') | 492 logging.info('Normalizing object paths.') |
599 for symbol in raw_symbols: | 493 for symbol in raw_symbols: |
600 symbol.object_path = _NormalizeObjectPath(symbol.object_path) | 494 symbol.object_path = _NormalizeObjectPath(symbol.object_path) |
601 | 495 |
602 size_info = models.SizeInfo(section_sizes, raw_symbols) | 496 size_info = models.SizeInfo(section_sizes, models.SymbolGroup(raw_symbols)) |
603 | 497 |
604 # Name normalization not strictly required, but makes for smaller files. | 498 # Name normalization not strictly required, but makes for smaller files. |
605 if raw_only: | 499 if raw_only: |
606 logging.info('Normalizing symbol names') | 500 logging.info('Normalizing symbol names') |
607 _NormalizeNames(size_info.raw_symbols) | 501 _NormalizeNames(size_info.symbols) |
608 else: | 502 else: |
609 _PostProcessSizeInfo(size_info) | 503 _PostProcessSizeInfo(size_info) |
610 | 504 |
611 if logging.getLogger().isEnabledFor(logging.DEBUG): | 505 if logging.getLogger().isEnabledFor(logging.DEBUG): |
612 # Padding is reported in size coverage logs. | 506 # Padding is reported in size coverage logs. |
613 if raw_only: | 507 if raw_only: |
614 _CalculatePadding(size_info.raw_symbols) | 508 _CalculatePadding(size_info.symbols) |
615 for line in describe.DescribeSizeInfoCoverage(size_info): | 509 for line in describe.DescribeSizeInfoCoverage(size_info): |
616 logging.info(line) | 510 logging.info(line) |
617 logging.info('Recorded info for %d symbols', len(size_info.raw_symbols)) | 511 logging.info('Recorded info for %d symbols', len(size_info.symbols)) |
618 return size_info | 512 return size_info |
619 | 513 |
620 | 514 |
621 def _DetectGitRevision(directory): | 515 def _DetectGitRevision(directory): |
622 try: | 516 try: |
623 git_rev = subprocess.check_output( | 517 git_rev = subprocess.check_output( |
624 ['git', '-C', directory, 'rev-parse', 'HEAD']) | 518 ['git', '-C', directory, 'rev-parse', 'HEAD']) |
625 return git_rev.rstrip() | 519 return git_rev.rstrip() |
626 except Exception: | 520 except Exception: |
627 logging.warning('Failed to detect git revision for file metadata.') | 521 logging.warning('Failed to detect git revision for file metadata.') |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
776 logging.warning('Packed section not present: %s', packed_section_name) | 670 logging.warning('Packed section not present: %s', packed_section_name) |
777 else: | 671 else: |
778 size_info.section_sizes['%s (unpacked)' % packed_section_name] = ( | 672 size_info.section_sizes['%s (unpacked)' % packed_section_name] = ( |
779 unstripped_section_sizes.get(packed_section_name)) | 673 unstripped_section_sizes.get(packed_section_name)) |
780 | 674 |
781 logging.info('Recording metadata: \n %s', | 675 logging.info('Recording metadata: \n %s', |
782 '\n '.join(describe.DescribeMetadata(size_info.metadata))) | 676 '\n '.join(describe.DescribeMetadata(size_info.metadata))) |
783 logging.info('Saving result to %s', args.size_file) | 677 logging.info('Saving result to %s', args.size_file) |
784 file_format.SaveSizeInfo(size_info, args.size_file) | 678 file_format.SaveSizeInfo(size_info, args.size_file) |
785 logging.info('Done') | 679 logging.info('Done') |
OLD | NEW |