OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/tracing/common/process_metrics_memory_dump_provider.h" | 5 #include "components/tracing/common/process_metrics_memory_dump_provider.h" |
6 | 6 |
7 #include <fcntl.h> | 7 #include <fcntl.h> |
8 #include <stdint.h> | 8 #include <stdint.h> |
9 | 9 |
10 #include <map> | 10 #include <map> |
(...skipping 10 matching lines...) Expand all Loading... | |
21 #include "base/trace_event/memory_dump_manager.h" | 21 #include "base/trace_event/memory_dump_manager.h" |
22 #include "base/trace_event/process_memory_dump.h" | 22 #include "base/trace_event/process_memory_dump.h" |
23 #include "base/trace_event/process_memory_maps.h" | 23 #include "base/trace_event/process_memory_maps.h" |
24 #include "base/trace_event/process_memory_totals.h" | 24 #include "base/trace_event/process_memory_totals.h" |
25 #include "build/build_config.h" | 25 #include "build/build_config.h" |
26 | 26 |
27 #if defined(OS_MACOSX) | 27 #if defined(OS_MACOSX) |
28 #include <libproc.h> | 28 #include <libproc.h> |
29 #include <mach/mach.h> | 29 #include <mach/mach.h> |
30 #include <mach/mach_vm.h> | 30 #include <mach/mach_vm.h> |
31 #include <mach/shared_region.h> | |
31 #include <sys/param.h> | 32 #include <sys/param.h> |
32 | 33 |
34 #include <mach-o/dyld_images.h> | |
35 #include <mach-o/loader.h> | |
36 #include <mach/mach.h> | |
37 | |
33 #include "base/numerics/safe_math.h" | 38 #include "base/numerics/safe_math.h" |
34 #endif // defined(OS_MACOSX) | 39 #endif // defined(OS_MACOSX) |
35 | 40 |
36 namespace tracing { | 41 namespace tracing { |
37 | 42 |
38 namespace { | 43 namespace { |
39 | 44 |
40 base::LazyInstance< | 45 base::LazyInstance< |
41 std::map<base::ProcessId, | 46 std::map<base::ProcessId, |
42 std::unique_ptr<ProcessMetricsMemoryDumpProvider>>>::Leaky | 47 std::unique_ptr<ProcessMetricsMemoryDumpProvider>>>::Leaky |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
222 res = ReadLinuxProcSmapsFile(smaps_file.get(), pmd->process_mmaps()); | 227 res = ReadLinuxProcSmapsFile(smaps_file.get(), pmd->process_mmaps()); |
223 } | 228 } |
224 | 229 |
225 if (res) | 230 if (res) |
226 pmd->set_has_process_mmaps(); | 231 pmd->set_has_process_mmaps(); |
227 return res; | 232 return res; |
228 } | 233 } |
229 #endif // defined(OS_LINUX) || defined(OS_ANDROID) | 234 #endif // defined(OS_LINUX) || defined(OS_ANDROID) |
230 | 235 |
231 #if defined(OS_MACOSX) | 236 #if defined(OS_MACOSX) |
232 bool ProcessMetricsMemoryDumpProvider::DumpProcessMemoryMaps( | 237 |
233 const base::trace_event::MemoryDumpArgs& args, | 238 namespace { |
234 base::trace_event::ProcessMemoryDump* pmd) { | 239 |
240 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion; | |
241 | |
242 bool IsAddressInSharedRegion(uint64_t address) { | |
243 return address >= SHARED_REGION_BASE_X86_64 && | |
244 address < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64); | |
245 } | |
246 | |
247 // Creates VMRegions for all dyld images. Returns whether the operation | |
248 // succeeded. | |
249 bool GetDyldRegions(std::vector<VMRegion>* regions) { | |
250 task_dyld_info_data_t dyld_info; | |
251 mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT; | |
252 kern_return_t kr = | |
253 task_info(mach_task_self(), TASK_DYLD_INFO, | |
254 reinterpret_cast<task_info_t>(&dyld_info), &count); | |
255 if (kr != KERN_SUCCESS) | |
256 return false; | |
257 | |
258 const struct dyld_all_image_infos* all_image_infos = | |
259 reinterpret_cast<const struct dyld_all_image_infos*>( | |
260 dyld_info.all_image_info_addr); | |
261 | |
262 for (size_t i = 0; i < all_image_infos->infoArrayCount; i++) { | |
263 const char* image_name = all_image_infos->infoArray[i].imageFilePath; | |
264 | |
265 // The public definition for dyld_all_image_infos/dyld_image_info is wrong | |
266 // for 64-bit platforms. We explicitly cast to struct mach_header_64 even | |
267 // though the public definition claims that this is a struct mach_header. | |
268 const struct mach_header_64* const header = | |
269 reinterpret_cast<const struct mach_header_64* const>( | |
270 all_image_infos->infoArray[i].imageLoadAddress); | |
271 | |
272 const struct load_command* load_cmd = | |
273 reinterpret_cast<const struct load_command*>(header + 1); | |
274 for (unsigned int i = 0; load_cmd && (i < header->ncmds); ++i) { | |
275 if (load_cmd->cmd == LC_SEGMENT_64) { | |
276 const segment_command_64* seg = | |
277 reinterpret_cast<const segment_command_64*>(load_cmd); | |
278 if (strcmp(seg->segname, SEG_PAGEZERO) == 0) | |
Mark Mentovai
2017/02/11 02:18:50
You should check that the size of this command is
erikchen
2017/02/11 02:39:46
Done.
| |
279 continue; | |
280 | |
281 uint32_t protection_flags = 0; | |
282 if (seg->maxprot & VM_PROT_READ) | |
283 protection_flags |= VMRegion::kProtectionFlagsRead; | |
284 if (seg->maxprot & VM_PROT_WRITE) | |
285 protection_flags |= VMRegion::kProtectionFlagsWrite; | |
286 if (seg->maxprot & VM_PROT_EXECUTE) | |
287 protection_flags |= VMRegion::kProtectionFlagsExec; | |
288 | |
289 VMRegion region; | |
290 region.size_in_bytes = seg->vmsize; | |
291 region.protection_flags = protection_flags; | |
292 region.mapped_file = image_name; | |
293 region.start_address = | |
294 reinterpret_cast<uint64_t>(header) + seg->fileoff; | |
295 | |
296 // We intentionally avoid setting any page information, which is not | |
297 // available from dyld. The fields will be populated later. | |
298 regions->push_back(region); | |
299 } else if (load_cmd->cmd == LC_SEGMENT) { | |
Mark Mentovai
2017/02/11 02:18:50
Don’t waste your time. You won’t see one of these
erikchen
2017/02/11 02:39:46
Done.
| |
300 const segment_command* seg = | |
301 reinterpret_cast<const segment_command*>(load_cmd); | |
302 if (strcmp(seg->segname, SEG_PAGEZERO) == 0) | |
303 continue; | |
304 | |
305 uint32_t protection_flags = 0; | |
306 if (seg->maxprot & VM_PROT_READ) | |
307 protection_flags |= VMRegion::kProtectionFlagsRead; | |
308 if (seg->maxprot & VM_PROT_WRITE) | |
309 protection_flags |= VMRegion::kProtectionFlagsWrite; | |
310 if (seg->maxprot & VM_PROT_EXECUTE) | |
311 protection_flags |= VMRegion::kProtectionFlagsExec; | |
312 | |
313 VMRegion region; | |
314 region.size_in_bytes = seg->vmsize; | |
315 region.protection_flags = protection_flags; | |
316 region.mapped_file = image_name; | |
317 region.start_address = | |
318 reinterpret_cast<uint64_t>(header) + seg->fileoff; | |
319 | |
320 // We intentionally avoid setting any page information, which is not | |
321 // available from dyld. The fields will be populated later. | |
322 regions->push_back(region); | |
323 } | |
324 load_cmd = reinterpret_cast<const struct load_command*>( | |
325 reinterpret_cast<const char*>(load_cmd) + load_cmd->cmdsize); | |
326 | |
327 // Ensure that load_cmd doesn't run past header->sizeofcmds. | |
328 if (reinterpret_cast<uint64_t>(header + 1) + header->sizeofcmds < | |
Mark Mentovai
2017/02/11 02:18:50
You should check up at the top of the loop that th
erikchen
2017/02/11 02:39:46
Done.
| |
329 reinterpret_cast<uint64_t>(load_cmd)) { | |
330 return false; | |
331 } | |
332 } | |
333 } | |
334 return true; | |
335 } | |
336 | |
337 void PopulateByteStats(VMRegion* region, const vm_region_submap_info_64& info) { | |
338 uint32_t share_mode = info.share_mode; | |
339 if (share_mode == SM_COW && info.ref_count == 1) | |
340 share_mode = SM_PRIVATE; | |
341 | |
342 uint64_t dirty_bytes = info.pages_dirtied * PAGE_SIZE; | |
343 uint64_t clean_bytes = | |
344 (info.pages_resident - info.pages_reusable - info.pages_dirtied) * | |
345 PAGE_SIZE; | |
346 switch (share_mode) { | |
347 case SM_LARGE_PAGE: | |
348 case SM_PRIVATE: | |
349 region->byte_stats_private_dirty_resident = dirty_bytes; | |
350 region->byte_stats_private_clean_resident = clean_bytes; | |
351 break; | |
352 case SM_COW: | |
353 region->byte_stats_private_dirty_resident = dirty_bytes; | |
354 region->byte_stats_shared_clean_resident = clean_bytes; | |
355 break; | |
356 case SM_SHARED: | |
357 case SM_PRIVATE_ALIASED: | |
358 case SM_TRUESHARED: | |
359 case SM_SHARED_ALIASED: | |
360 region->byte_stats_shared_dirty_resident = dirty_bytes; | |
361 region->byte_stats_shared_clean_resident = clean_bytes; | |
362 break; | |
363 case SM_EMPTY: | |
364 break; | |
365 default: | |
366 NOTREACHED(); | |
367 break; | |
368 } | |
369 } | |
370 | |
371 // Creates VMRegions from mach_vm_region_recurse. Returns whether the operation | |
372 // succeeded. | |
373 bool GetAllRegions(std::vector<VMRegion>* regions) { | |
235 const int pid = getpid(); | 374 const int pid = getpid(); |
236 task_t task = mach_task_self(); | 375 task_t task = mach_task_self(); |
237 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion; | |
238 mach_vm_size_t size = 0; | 376 mach_vm_size_t size = 0; |
239 vm_region_submap_info_64 info; | 377 vm_region_submap_info_64 info; |
240 natural_t depth = 1; | 378 natural_t depth = 1; |
241 mach_msg_type_number_t count = sizeof(info); | 379 mach_msg_type_number_t count = sizeof(info); |
242 for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) { | 380 for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) { |
243 memset(&info, 0, sizeof(info)); | 381 memset(&info, 0, sizeof(info)); |
244 kern_return_t kr = mach_vm_region_recurse( | 382 kern_return_t kr = mach_vm_region_recurse( |
245 task, &address, &size, &depth, | 383 task, &address, &size, &depth, |
246 reinterpret_cast<vm_region_info_t>(&info), &count); | 384 reinterpret_cast<vm_region_info_t>(&info), &count); |
247 if (kr == KERN_INVALID_ADDRESS) // nothing else left | 385 if (kr == KERN_INVALID_ADDRESS) // nothing else left |
248 break; | 386 break; |
249 if (kr != KERN_SUCCESS) // something bad | 387 if (kr != KERN_SUCCESS) // something bad |
250 return false; | 388 return false; |
251 if (info.is_submap) { | 389 if (info.is_submap) { |
252 size = 0; | 390 size = 0; |
253 ++depth; | 391 ++depth; |
254 continue; | 392 continue; |
255 } | 393 } |
256 | 394 |
257 if (info.share_mode == SM_COW && info.ref_count == 1) | |
258 info.share_mode = SM_PRIVATE; | |
259 | |
260 VMRegion region; | 395 VMRegion region; |
261 uint64_t dirty_bytes = info.pages_dirtied * PAGE_SIZE; | 396 PopulateByteStats(®ion, info); |
262 uint64_t clean_bytes = | |
263 (info.pages_resident - info.pages_reusable - info.pages_dirtied) * | |
264 PAGE_SIZE; | |
265 switch (info.share_mode) { | |
266 case SM_LARGE_PAGE: | |
267 case SM_PRIVATE: | |
268 region.byte_stats_private_dirty_resident = dirty_bytes; | |
269 region.byte_stats_private_clean_resident = clean_bytes; | |
270 break; | |
271 case SM_COW: | |
272 region.byte_stats_private_dirty_resident = dirty_bytes; | |
273 region.byte_stats_shared_clean_resident = clean_bytes; | |
274 break; | |
275 case SM_SHARED: | |
276 case SM_PRIVATE_ALIASED: | |
277 case SM_TRUESHARED: | |
278 case SM_SHARED_ALIASED: | |
279 region.byte_stats_shared_dirty_resident = dirty_bytes; | |
280 region.byte_stats_shared_clean_resident = clean_bytes; | |
281 break; | |
282 case SM_EMPTY: | |
283 break; | |
284 default: | |
285 NOTREACHED(); | |
286 break; | |
287 } | |
288 | 397 |
289 if (info.protection & VM_PROT_READ) | 398 if (info.protection & VM_PROT_READ) |
290 region.protection_flags |= VMRegion::kProtectionFlagsRead; | 399 region.protection_flags |= VMRegion::kProtectionFlagsRead; |
291 if (info.protection & VM_PROT_WRITE) | 400 if (info.protection & VM_PROT_WRITE) |
292 region.protection_flags |= VMRegion::kProtectionFlagsWrite; | 401 region.protection_flags |= VMRegion::kProtectionFlagsWrite; |
293 if (info.protection & VM_PROT_EXECUTE) | 402 if (info.protection & VM_PROT_EXECUTE) |
294 region.protection_flags |= VMRegion::kProtectionFlagsExec; | 403 region.protection_flags |= VMRegion::kProtectionFlagsExec; |
295 | 404 |
296 char buffer[MAXPATHLEN]; | 405 char buffer[MAXPATHLEN]; |
297 int length = proc_regionfilename(pid, address, buffer, MAXPATHLEN); | 406 int length = proc_regionfilename(pid, address, buffer, MAXPATHLEN); |
298 if (length != 0) | 407 if (length != 0) |
299 region.mapped_file.assign(buffer, length); | 408 region.mapped_file.assign(buffer, length); |
300 | 409 |
301 region.byte_stats_swapped = info.pages_swapped_out * PAGE_SIZE; | 410 region.byte_stats_swapped = info.pages_swapped_out * PAGE_SIZE; |
302 region.start_address = address; | 411 region.start_address = address; |
303 region.size_in_bytes = size; | 412 region.size_in_bytes = size; |
304 pmd->process_mmaps()->AddVMRegion(region); | 413 regions->push_back(region); |
305 | 414 |
306 base::CheckedNumeric<mach_vm_address_t> numeric(address); | 415 base::CheckedNumeric<mach_vm_address_t> numeric(address); |
307 numeric += size; | 416 numeric += size; |
308 if (!numeric.IsValid()) | 417 if (!numeric.IsValid()) |
309 break; | 418 return false; |
310 address = numeric.ValueOrDie(); | 419 address = numeric.ValueOrDie(); |
311 } | 420 } |
421 return true; | |
422 } | |
423 | |
424 void CopyRegionByteStats(VMRegion* dest, const VMRegion& source) { | |
425 dest->byte_stats_private_dirty_resident = | |
426 source.byte_stats_private_dirty_resident; | |
427 dest->byte_stats_private_clean_resident = | |
428 source.byte_stats_private_clean_resident; | |
429 dest->byte_stats_shared_dirty_resident = | |
430 source.byte_stats_shared_dirty_resident; | |
431 dest->byte_stats_shared_clean_resident = | |
432 source.byte_stats_shared_clean_resident; | |
433 dest->byte_stats_swapped = source.byte_stats_swapped; | |
434 dest->byte_stats_proportional_resident = | |
435 source.byte_stats_proportional_resident; | |
436 } | |
437 | |
438 } // namespace | |
439 | |
440 bool ProcessMetricsMemoryDumpProvider::DumpProcessMemoryMaps( | |
441 const base::trace_event::MemoryDumpArgs& args, | |
442 base::trace_event::ProcessMemoryDump* pmd) { | |
443 using VMRegion = base::trace_event::ProcessMemoryMaps::VMRegion; | |
444 | |
445 std::vector<VMRegion> dyld_regions; | |
446 if (!GetDyldRegions(&dyld_regions)) | |
447 return false; | |
448 std::vector<VMRegion> all_regions; | |
449 if (!GetAllRegions(&all_regions)) | |
450 return false; | |
451 | |
452 // Cache information from dyld_regions in a data-structure more conducive to | |
453 // fast lookups. | |
454 std::unordered_map<uint64_t, VMRegion*> address_to_vm_region; | |
455 std::vector<uint64_t> addresses_in_shared_region; | |
456 for (VMRegion& region : dyld_regions) { | |
457 if (IsAddressInSharedRegion(region.start_address)) | |
458 addresses_in_shared_region.push_back(region.start_address); | |
459 address_to_vm_region[region.start_address] = ®ion; | |
460 } | |
461 | |
462 // Merge information from dyld regions and all regions. | |
463 for (const VMRegion& region : all_regions) { | |
464 // Check to see if the region already has a VMRegion created from a dyld | |
465 // load command. If so, copy the byte stats and move on. | |
466 auto it = address_to_vm_region.find(region.start_address); | |
467 if (it != address_to_vm_region.end() && | |
468 it->second->size_in_bytes == region.size_in_bytes) { | |
469 CopyRegionByteStats(it->second, region); | |
470 continue; | |
471 } | |
472 | |
473 // Check to see if the region is likely used for the dyld shared cache. | |
474 if (IsAddressInSharedRegion(region.start_address)) { | |
475 uint64_t end_address = region.start_address + region.size_in_bytes; | |
476 for (uint64_t address : addresses_in_shared_region) { | |
477 // This region is likely used for the dyld shared cache. Don't record | |
478 // any byte stats since: | |
479 // 1. It's not possible to figure out which dyld regions the byte | |
480 // stats correspond to. | |
481 // 2. The region is likely shared by non-Chrome processes, so there's | |
482 // no point in charging the pages towards Chrome. | |
483 if (address >= region.start_address && address < end_address) { | |
484 continue; | |
485 } | |
486 } | |
487 } | |
488 pmd->process_mmaps()->AddVMRegion(region); | |
489 } | |
490 | |
491 for (VMRegion& region : dyld_regions) { | |
492 pmd->process_mmaps()->AddVMRegion(region); | |
493 } | |
312 | 494 |
313 pmd->set_has_process_mmaps(); | 495 pmd->set_has_process_mmaps(); |
314 return true; | 496 return true; |
315 } | 497 } |
316 #endif // defined(OS_MACOSX) | 498 #endif // defined(OS_MACOSX) |
317 | 499 |
318 // static | 500 // static |
319 void ProcessMetricsMemoryDumpProvider::RegisterForProcess( | 501 void ProcessMetricsMemoryDumpProvider::RegisterForProcess( |
320 base::ProcessId process) { | 502 base::ProcessId process) { |
321 std::unique_ptr<ProcessMetricsMemoryDumpProvider> metrics_provider( | 503 std::unique_ptr<ProcessMetricsMemoryDumpProvider> metrics_provider( |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
463 #endif | 645 #endif |
464 } | 646 } |
465 | 647 |
466 void ProcessMetricsMemoryDumpProvider::SuspendFastMemoryPolling() { | 648 void ProcessMetricsMemoryDumpProvider::SuspendFastMemoryPolling() { |
467 #if defined(OS_LINUX) || defined(OS_ANDROID) | 649 #if defined(OS_LINUX) || defined(OS_ANDROID) |
468 fast_polling_statm_fd_.reset(); | 650 fast_polling_statm_fd_.reset(); |
469 #endif | 651 #endif |
470 } | 652 } |
471 | 653 |
472 } // namespace tracing | 654 } // namespace tracing |
OLD | NEW |