Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1027)

Side by Side Diff: components/tracing/common/process_metrics_memory_dump_provider.cc

Issue 2696923002: Update logic for emitting region information from dyld. (Closed)
Patch Set: Allow memory regions to be subsets of dyld regions. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/tracing/common/process_metrics_memory_dump_provider.h" 5 #include "components/tracing/common/process_metrics_memory_dump_provider.h"
6 6
7 #include <fcntl.h> 7 #include <fcntl.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <map> 10 #include <map>
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 237
238 namespace { 238 namespace {
239 239
240 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion; 240 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion;
241 241
242 bool IsAddressInSharedRegion(uint64_t address) { 242 bool IsAddressInSharedRegion(uint64_t address) {
243 return address >= SHARED_REGION_BASE_X86_64 && 243 return address >= SHARED_REGION_BASE_X86_64 &&
244 address < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64); 244 address < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
245 } 245 }
246 246
247 bool IsRegionContainedInRegion(const VMRegion& containee,
248 const VMRegion& container) {
249 uint64_t containee_end_address =
250 containee.start_address + containee.size_in_bytes;
251 uint64_t container_end_address =
252 container.start_address + container.size_in_bytes;
253 return containee.start_address >= container.start_address &&
254 containee_end_address <= container_end_address;
255 }
256
257 bool DoRegionsIntersect(const VMRegion& a, const VMRegion& b) {
258 uint64_t a_end_address = a.start_address + a.size_in_bytes;
259 uint64_t b_end_address = b.start_address + b.size_in_bytes;
Primiano Tucci (use gerrit) 2017/02/15 09:54:31 You can halve the numbers of comparisons here. Ass
erikchen 2017/02/15 17:26:20 Done.
260 if (a.start_address >= b.start_address && a.start_address < b_end_address)
261 return true;
262 if (b.start_address >= a.start_address && b.start_address < a_end_address)
263 return true;
264 return false;
265 }
266
247 // Creates VMRegions for all dyld images. Returns whether the operation 267 // Creates VMRegions for all dyld images. Returns whether the operation
248 // succeeded. 268 // succeeded.
249 bool GetDyldRegions(std::vector<VMRegion>* regions) { 269 bool GetDyldRegions(std::vector<VMRegion>* regions) {
250 task_dyld_info_data_t dyld_info; 270 task_dyld_info_data_t dyld_info;
251 mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT; 271 mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT;
252 kern_return_t kr = 272 kern_return_t kr =
253 task_info(mach_task_self(), TASK_DYLD_INFO, 273 task_info(mach_task_self(), TASK_DYLD_INFO,
254 reinterpret_cast<task_info_t>(&dyld_info), &count); 274 reinterpret_cast<task_info_t>(&dyld_info), &count);
255 if (kr != KERN_SUCCESS) 275 if (kr != KERN_SUCCESS)
256 return false; 276 return false;
257 277
258 const struct dyld_all_image_infos* all_image_infos = 278 const struct dyld_all_image_infos* all_image_infos =
259 reinterpret_cast<const struct dyld_all_image_infos*>( 279 reinterpret_cast<const struct dyld_all_image_infos*>(
260 dyld_info.all_image_info_addr); 280 dyld_info.all_image_info_addr);
261 281
282 bool emitted_linkedit_from_dyld_shared_cache = false;
262 for (size_t i = 0; i < all_image_infos->infoArrayCount; i++) { 283 for (size_t i = 0; i < all_image_infos->infoArrayCount; i++) {
263 const char* image_name = all_image_infos->infoArray[i].imageFilePath; 284 const char* image_name = all_image_infos->infoArray[i].imageFilePath;
264 285
265 // The public definition for dyld_all_image_infos/dyld_image_info is wrong 286 // The public definition for dyld_all_image_infos/dyld_image_info is wrong
266 // for 64-bit platforms. We explicitly cast to struct mach_header_64 even 287 // for 64-bit platforms. We explicitly cast to struct mach_header_64 even
267 // though the public definition claims that this is a struct mach_header. 288 // though the public definition claims that this is a struct mach_header.
268 const struct mach_header_64* const header = 289 const struct mach_header_64* const header =
269 reinterpret_cast<const struct mach_header_64* const>( 290 reinterpret_cast<const struct mach_header_64* const>(
270 all_image_infos->infoArray[i].imageLoadAddress); 291 all_image_infos->infoArray[i].imageLoadAddress);
271 292
272 uint64_t next_command = reinterpret_cast<uint64_t>(header + 1); 293 uint64_t next_command = reinterpret_cast<uint64_t>(header + 1);
273 uint64_t command_end = next_command + header->sizeofcmds; 294 uint64_t command_end = next_command + header->sizeofcmds;
274 for (unsigned int i = 0; i < header->ncmds; ++i) { 295 uint64_t slide = 0;
296 for (unsigned int j = 0; j < header->ncmds; ++j) {
275 // Ensure that next_command doesn't run past header->sizeofcmds. 297 // Ensure that next_command doesn't run past header->sizeofcmds.
276 if (next_command + sizeof(struct load_command) > command_end) 298 if (next_command + sizeof(struct load_command) > command_end)
277 return false; 299 return false;
278 const struct load_command* load_cmd = 300 const struct load_command* load_cmd =
279 reinterpret_cast<const struct load_command*>(next_command); 301 reinterpret_cast<const struct load_command*>(next_command);
280 next_command += load_cmd->cmdsize; 302 next_command += load_cmd->cmdsize;
281 303
282 if (load_cmd->cmd == LC_SEGMENT_64) { 304 if (load_cmd->cmd == LC_SEGMENT_64) {
283 if (load_cmd->cmdsize < sizeof(segment_command_64)) 305 if (load_cmd->cmdsize < sizeof(segment_command_64))
284 return false; 306 return false;
285 const segment_command_64* seg = 307 const segment_command_64* seg =
286 reinterpret_cast<const segment_command_64*>(load_cmd); 308 reinterpret_cast<const segment_command_64*>(load_cmd);
287 if (strcmp(seg->segname, SEG_PAGEZERO) == 0) 309 if (strcmp(seg->segname, SEG_PAGEZERO) == 0)
288 continue; 310 continue;
311 if (strcmp(seg->segname, SEG_TEXT) == 0) {
312 slide = reinterpret_cast<uint64_t>(header) - seg->vmaddr;
313 }
314
315 // Avoid emitting LINKEDIT regions in the dyld shared cache, since they
316 // all overlap.
317 if (IsAddressInSharedRegion(seg->vmaddr) &&
318 strcmp(seg->segname, SEG_LINKEDIT) == 0) {
319 if (emitted_linkedit_from_dyld_shared_cache) {
320 continue;
321 } else {
322 emitted_linkedit_from_dyld_shared_cache = true;
323 image_name = "dyld shared cache combined __LINKEDIT";
324 }
325 }
289 326
290 uint32_t protection_flags = 0; 327 uint32_t protection_flags = 0;
291 if (seg->initprot & VM_PROT_READ) 328 if (seg->initprot & VM_PROT_READ)
292 protection_flags |= VMRegion::kProtectionFlagsRead; 329 protection_flags |= VMRegion::kProtectionFlagsRead;
293 if (seg->initprot & VM_PROT_WRITE) 330 if (seg->initprot & VM_PROT_WRITE)
294 protection_flags |= VMRegion::kProtectionFlagsWrite; 331 protection_flags |= VMRegion::kProtectionFlagsWrite;
295 if (seg->initprot & VM_PROT_EXECUTE) 332 if (seg->initprot & VM_PROT_EXECUTE)
296 protection_flags |= VMRegion::kProtectionFlagsExec; 333 protection_flags |= VMRegion::kProtectionFlagsExec;
297 334
298 VMRegion region; 335 VMRegion region;
299 region.size_in_bytes = seg->vmsize; 336 region.size_in_bytes = seg->vmsize;
300 region.protection_flags = protection_flags; 337 region.protection_flags = protection_flags;
301 region.mapped_file = image_name; 338 region.mapped_file = image_name;
302 region.start_address = 339 region.start_address = slide + seg->vmaddr;
303 reinterpret_cast<uint64_t>(header) + seg->fileoff;
304 340
305 // We intentionally avoid setting any page information, which is not 341 // We intentionally avoid setting any page information, which is not
306 // available from dyld. The fields will be populated later. 342 // available from dyld. The fields will be populated later.
307 regions->push_back(region); 343 regions->push_back(region);
308 } 344 }
309 } 345 }
310 } 346 }
311 return true; 347 return true;
312 } 348 }
313 349
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
391 427
392 base::CheckedNumeric<mach_vm_address_t> numeric(address); 428 base::CheckedNumeric<mach_vm_address_t> numeric(address);
393 numeric += size; 429 numeric += size;
394 if (!numeric.IsValid()) 430 if (!numeric.IsValid())
395 return false; 431 return false;
396 address = numeric.ValueOrDie(); 432 address = numeric.ValueOrDie();
397 } 433 }
398 return true; 434 return true;
399 } 435 }
400 436
401 void CopyRegionByteStats(VMRegion* dest, const VMRegion& source) { 437 void AddRegionByteStats(VMRegion* dest, const VMRegion& source) {
402 dest->byte_stats_private_dirty_resident = 438 dest->byte_stats_private_dirty_resident +=
403 source.byte_stats_private_dirty_resident; 439 source.byte_stats_private_dirty_resident;
404 dest->byte_stats_private_clean_resident = 440 dest->byte_stats_private_clean_resident +=
405 source.byte_stats_private_clean_resident; 441 source.byte_stats_private_clean_resident;
406 dest->byte_stats_shared_dirty_resident = 442 dest->byte_stats_shared_dirty_resident +=
407 source.byte_stats_shared_dirty_resident; 443 source.byte_stats_shared_dirty_resident;
408 dest->byte_stats_shared_clean_resident = 444 dest->byte_stats_shared_clean_resident +=
409 source.byte_stats_shared_clean_resident; 445 source.byte_stats_shared_clean_resident;
410 dest->byte_stats_swapped = source.byte_stats_swapped; 446 dest->byte_stats_swapped += source.byte_stats_swapped;
411 dest->byte_stats_proportional_resident = 447 dest->byte_stats_proportional_resident +=
412 source.byte_stats_proportional_resident; 448 source.byte_stats_proportional_resident;
413 } 449 }
414 450
415 } // namespace 451 } // namespace
416 452
417 bool ProcessMetricsMemoryDumpProvider::DumpProcessMemoryMaps( 453 bool ProcessMetricsMemoryDumpProvider::DumpProcessMemoryMaps(
418 const base::trace_event::MemoryDumpArgs& args, 454 const base::trace_event::MemoryDumpArgs& args,
419 base::trace_event::ProcessMemoryDump* pmd) { 455 base::trace_event::ProcessMemoryDump* pmd) {
420 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion; 456 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion;
421 457
422 std::vector<VMRegion> dyld_regions; 458 std::vector<VMRegion> dyld_regions;
423 if (!GetDyldRegions(&dyld_regions)) 459 if (!GetDyldRegions(&dyld_regions))
424 return false; 460 return false;
425 std::vector<VMRegion> all_regions; 461 std::vector<VMRegion> all_regions;
426 if (!GetAllRegions(&all_regions)) 462 if (!GetAllRegions(&all_regions))
427 return false; 463 return false;
428 464
429 // Cache information from dyld_regions in a data-structure more conducive to
430 // fast lookups.
431 std::unordered_map<uint64_t, VMRegion*> address_to_vm_region;
432 std::vector<uint64_t> addresses_in_shared_region;
433 for (VMRegion& region : dyld_regions) {
434 if (IsAddressInSharedRegion(region.start_address))
435 addresses_in_shared_region.push_back(region.start_address);
436 address_to_vm_region[region.start_address] = &region;
437 }
438
439 // Merge information from dyld regions and all regions. 465 // Merge information from dyld regions and all regions.
440 for (const VMRegion& region : all_regions) { 466 for (const VMRegion& region : all_regions) {
441 // Check to see if the region already has a VMRegion created from a dyld 467 bool skip = false;
442 // load command. If so, copy the byte stats and move on. 468 const bool in_shared_region = IsAddressInSharedRegion(region.start_address);
443 auto it = address_to_vm_region.find(region.start_address); 469 for (VMRegion& dyld_region : dyld_regions) {
444 if (it != address_to_vm_region.end() && 470 // If this region is fully contained in a dyld region, then add the bytes
445 it->second->size_in_bytes == region.size_in_bytes) { 471 // stats.
446 CopyRegionByteStats(it->second, region); 472 if (IsRegionContainedInRegion(region, dyld_region)) {
447 continue; 473 AddRegionByteStats(&dyld_region, region);
448 } 474 skip = true;
475 break;
476 }
449 477
450 // Check to see if the region is likely used for the dyld shared cache. 478 // Check to see if the region is likely used for the dyld shared cache.
451 if (IsAddressInSharedRegion(region.start_address)) { 479 if (in_shared_region) {
452 uint64_t end_address = region.start_address + region.size_in_bytes;
453 for (uint64_t address : addresses_in_shared_region) {
454 // This region is likely used for the dyld shared cache. Don't record 480 // This region is likely used for the dyld shared cache. Don't record
455 // any byte stats since: 481 // any byte stats since:
456 // 1. It's not possible to figure out which dyld regions the byte 482 // 1. It's not possible to figure out which dyld regions the byte
457 // stats correspond to. 483 // stats correspond to.
458 // 2. The region is likely shared by non-Chrome processes, so there's 484 // 2. The region is likely shared by non-Chrome processes, so there's
459 // no point in charging the pages towards Chrome. 485 // no point in charging the pages towards Chrome.
460 if (address >= region.start_address && address < end_address) { 486 if (DoRegionsIntersect(region, dyld_region)) {
461 continue; 487 skip = true;
488 break;
462 } 489 }
463 } 490 }
464 } 491 }
492 if (skip)
493 continue;
465 pmd->process_mmaps()->AddVMRegion(region); 494 pmd->process_mmaps()->AddVMRegion(region);
466 } 495 }
467 496
468 for (VMRegion& region : dyld_regions) { 497 for (VMRegion& region : dyld_regions) {
469 pmd->process_mmaps()->AddVMRegion(region); 498 pmd->process_mmaps()->AddVMRegion(region);
470 } 499 }
471 500
472 pmd->set_has_process_mmaps(); 501 pmd->set_has_process_mmaps();
473 return true; 502 return true;
474 } 503 }
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
622 #endif 651 #endif
623 } 652 }
624 653
625 void ProcessMetricsMemoryDumpProvider::SuspendFastMemoryPolling() { 654 void ProcessMetricsMemoryDumpProvider::SuspendFastMemoryPolling() {
626 #if defined(OS_LINUX) || defined(OS_ANDROID) 655 #if defined(OS_LINUX) || defined(OS_ANDROID)
627 fast_polling_statm_fd_.reset(); 656 fast_polling_statm_fd_.reset();
628 #endif 657 #endif
629 } 658 }
630 659
631 } // namespace tracing 660 } // namespace tracing
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698