| Index: base/allocator/allocator_interception_mac.mm
|
| diff --git a/base/process/memory_mac.mm b/base/allocator/allocator_interception_mac.mm
|
| similarity index 77%
|
| copy from base/process/memory_mac.mm
|
| copy to base/allocator/allocator_interception_mac.mm
|
| index 4c1b12043e60b64fbc861102cb542e4943c6c0dc..913136d642bc96c2520d0734887449d53c62c8a4 100644
|
| --- a/base/process/memory_mac.mm
|
| +++ b/base/allocator/allocator_interception_mac.mm
|
| @@ -1,8 +1,21 @@
|
| -// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
| +// Copyright 2017 The Chromium Authors. All rights reserved.
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "base/process/memory.h"
|
| +// This file contains all the logic necessary to intercept allocations on
|
| +// macOS. "malloc zones" are an abstraction that allows the process to intercept
|
| +// all malloc-related functions. There is no good mechanism [short of
|
| +// interposition] to determine new malloc zones are added, so there's no clean
|
| +// mechanism to intercept all malloc zones. This file contains logic to
|
| +// intercept the default and purgeable zones, which always exist. A cursory
|
| +// review of Chrome seems to imply that non-default zones are almost never used.
|
| +//
|
| +// This file also contains logic to intercept Core Foundation and Objective-C
|
| +// allocations. The implementations forward to the default malloc zone, so the
|
| +// only reason to intercept these calls is to re-label OOM crashes with slightly
|
| +// more details.
|
| +
|
| +#include "base/allocator/allocator_interception_mac.h"
|
|
|
| #include <CoreFoundation/CoreFoundation.h>
|
| #import <Foundation/Foundation.h>
|
| @@ -15,24 +28,17 @@
|
|
|
| #include <new>
|
|
|
| -#include "base/lazy_instance.h"
|
| #include "base/logging.h"
|
| #include "base/mac/mac_util.h"
|
| #include "base/mac/mach_logging.h"
|
| +#include "base/process/memory.h"
|
| #include "base/scoped_clear_errno.h"
|
| #include "build/build_config.h"
|
| #include "third_party/apple_apsl/CFBase.h"
|
| #include "third_party/apple_apsl/malloc.h"
|
|
|
| namespace base {
|
| -
|
| -void EnableTerminationOnHeapCorruption() {
|
| -#if !ARCH_CPU_64_BITS
|
| - DLOG(WARNING) << "EnableTerminationOnHeapCorruption only works on 64-bit";
|
| -#endif
|
| -}
|
| -
|
| -// ------------------------------------------------------------------------
|
| +namespace allocator {
|
|
|
| namespace {
|
|
|
| @@ -58,14 +64,10 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone,
|
| *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
|
| struct vm_region_basic_info_64 info;
|
| mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
| - kern_return_t result =
|
| - mach_vm_region(mach_task_self(),
|
| - reprotection_start,
|
| - reprotection_length,
|
| - VM_REGION_BASIC_INFO_64,
|
| - reinterpret_cast<vm_region_info_t>(&info),
|
| - &count,
|
| - &unused);
|
| + kern_return_t result = mach_vm_region(
|
| + mach_task_self(), reprotection_start, reprotection_length,
|
| + VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&info),
|
| + &count, &unused);
|
| MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region";
|
|
|
| // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
| @@ -77,8 +79,9 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone,
|
| // simplification used: using the size of a full version 8 malloc zone rather
|
| // than the actual smaller size if the passed-in zone is not version 8.
|
| CHECK(*reprotection_start <=
|
| - reinterpret_cast<mach_vm_address_t>(default_zone));
|
| - mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
|
| + reinterpret_cast<mach_vm_address_t>(default_zone));
|
| + mach_vm_size_t zone_offset =
|
| + reinterpret_cast<mach_vm_size_t>(default_zone) -
|
| reinterpret_cast<mach_vm_size_t>(*reprotection_start);
|
| CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
|
|
| @@ -89,10 +92,8 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone,
|
| *reprotection_value = VM_PROT_NONE;
|
| } else {
|
| *reprotection_value = info.protection;
|
| - result = mach_vm_protect(mach_task_self(),
|
| - *reprotection_start,
|
| - *reprotection_length,
|
| - false,
|
| + result = mach_vm_protect(mach_task_self(), *reprotection_start,
|
| + *reprotection_length, false,
|
| info.protection | VM_PROT_WRITE);
|
| MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
|
| }
|
| @@ -100,15 +101,12 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone,
|
|
|
| // === C malloc/calloc/valloc/realloc/posix_memalign ===
|
|
|
| -typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
|
| - size_t size);
|
| +typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
|
| typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
|
| size_t num_items,
|
| size_t size);
|
| -typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
|
| - size_t size);
|
| -typedef void (*free_type)(struct _malloc_zone_t* zone,
|
| - void* ptr);
|
| +typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
|
| +typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
|
| typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
|
| void* ptr,
|
| size_t size);
|
| @@ -130,8 +128,7 @@ free_type g_old_free_purgeable;
|
| realloc_type g_old_realloc_purgeable;
|
| memalign_type g_old_memalign_purgeable;
|
|
|
| -void* oom_killer_malloc(struct _malloc_zone_t* zone,
|
| - size_t size) {
|
| +void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
|
| void* result = g_old_malloc(zone, size);
|
| if (!result && size)
|
| TerminateBecauseOutOfMemory(size);
|
| @@ -147,22 +144,18 @@ void* oom_killer_calloc(struct _malloc_zone_t* zone,
|
| return result;
|
| }
|
|
|
| -void* oom_killer_valloc(struct _malloc_zone_t* zone,
|
| - size_t size) {
|
| +void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
|
| void* result = g_old_valloc(zone, size);
|
| if (!result && size)
|
| TerminateBecauseOutOfMemory(size);
|
| return result;
|
| }
|
|
|
| -void oom_killer_free(struct _malloc_zone_t* zone,
|
| - void* ptr) {
|
| +void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
|
| g_old_free(zone, ptr);
|
| }
|
|
|
| -void* oom_killer_realloc(struct _malloc_zone_t* zone,
|
| - void* ptr,
|
| - size_t size) {
|
| +void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
|
| void* result = g_old_realloc(zone, ptr, size);
|
| if (!result && size)
|
| TerminateBecauseOutOfMemory(size);
|
| @@ -183,8 +176,7 @@ void* oom_killer_memalign(struct _malloc_zone_t* zone,
|
| return result;
|
| }
|
|
|
| -void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
|
| - size_t size) {
|
| +void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
| void* result = g_old_malloc_purgeable(zone, size);
|
| if (!result && size)
|
| TerminateBecauseOutOfMemory(size);
|
| @@ -200,16 +192,14 @@ void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
|
| return result;
|
| }
|
|
|
| -void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
|
| - size_t size) {
|
| +void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
| void* result = g_old_valloc_purgeable(zone, size);
|
| if (!result && size)
|
| TerminateBecauseOutOfMemory(size);
|
| return result;
|
| }
|
|
|
| -void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
|
| - void* ptr) {
|
| +void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
|
| g_old_free_purgeable(zone, ptr);
|
| }
|
|
|
| @@ -229,8 +219,8 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
|
| // Only die if posix_memalign would have returned ENOMEM, since there are
|
| // other reasons why NULL might be returned (see
|
| // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
|
| - if (!result && size && alignment >= sizeof(void*)
|
| - && (alignment & (alignment - 1)) == 0) {
|
| + if (!result && size && alignment >= sizeof(void*) &&
|
| + (alignment & (alignment - 1)) == 0) {
|
| TerminateBecauseOutOfMemory(size);
|
| }
|
| return result;
|
| @@ -253,9 +243,8 @@ bool CanGetContextForCFAllocator() {
|
| }
|
|
|
| CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
|
| - ChromeCFAllocatorLions* our_allocator =
|
| - const_cast<ChromeCFAllocatorLions*>(
|
| - reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
| + ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
|
| + reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
| return &our_allocator->_context;
|
| }
|
|
|
| @@ -297,8 +286,7 @@ void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
|
| typedef id (*allocWithZone_t)(id, SEL, NSZone*);
|
| allocWithZone_t g_old_allocWithZone;
|
|
|
| -id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
|
| -{
|
| +id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
|
| id result = g_old_allocWithZone(self, _cmd, zone);
|
| if (!result)
|
| TerminateBecauseOutOfMemory(0);
|
| @@ -307,7 +295,7 @@ id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
|
|
|
| } // namespace
|
|
|
| -bool UncheckedMalloc(size_t size, void** result) {
|
| +bool UncheckedMallocMac(size_t size, void** result) {
|
| #if defined(ADDRESS_SANITIZER)
|
| *result = malloc(size);
|
| #else
|
| @@ -321,7 +309,7 @@ bool UncheckedMalloc(size_t size, void** result) {
|
| return *result != NULL;
|
| }
|
|
|
| -bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
|
| +bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
|
| #if defined(ADDRESS_SANITIZER)
|
| *result = calloc(num_items, size);
|
| #else
|
| @@ -335,41 +323,33 @@ bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
|
| return *result != NULL;
|
| }
|
|
|
| -void* UncheckedMalloc(size_t size) {
|
| - void* address;
|
| - return UncheckedMalloc(size, &address) ? address : NULL;
|
| -}
|
| -
|
| -void* UncheckedCalloc(size_t num_items, size_t size) {
|
| - void* address;
|
| - return UncheckedCalloc(num_items, size, &address) ? address : NULL;
|
| -}
|
| -
|
| -void EnableTerminationOnOutOfMemory() {
|
| +void InterceptAllocationsMac() {
|
| if (g_oom_killer_enabled)
|
| return;
|
|
|
| g_oom_killer_enabled = true;
|
|
|
| - // === C malloc/calloc/valloc/realloc/posix_memalign ===
|
| +// === C malloc/calloc/valloc/realloc/posix_memalign ===
|
|
|
| - // This approach is not perfect, as requests for amounts of memory larger than
|
| - // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
|
| - // still fail with a NULL rather than dying (see
|
| - // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
|
| - // Unfortunately, it's the best we can do. Also note that this does not affect
|
| - // allocations from non-default zones.
|
| +// This approach is not perfect, as requests for amounts of memory larger than
|
| +// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
|
| +// still fail with a NULL rather than dying (see
|
| +// http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
|
| +// Unfortunately, it's the best we can do. Also note that this does not affect
|
| +// allocations from non-default zones.
|
|
|
| #if !defined(ADDRESS_SANITIZER)
|
| // Don't do anything special on OOM for the malloc zones replaced by
|
| // AddressSanitizer, as modifying or protecting them may not work correctly.
|
|
|
| CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
|
| - !g_old_memalign) << "Old allocators unexpectedly non-null";
|
| + !g_old_memalign)
|
| + << "Old allocators unexpectedly non-null";
|
|
|
| CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
|
| !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
|
| - !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
|
| + !g_old_memalign_purgeable)
|
| + << "Old allocators unexpectedly non-null";
|
|
|
| ChromeMallocZone* default_zone =
|
| reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
| @@ -379,8 +359,7 @@ void EnableTerminationOnOutOfMemory() {
|
| mach_vm_address_t default_reprotection_start = 0;
|
| mach_vm_size_t default_reprotection_length = 0;
|
| vm_prot_t default_reprotection_value = VM_PROT_NONE;
|
| - DeprotectMallocZone(default_zone,
|
| - &default_reprotection_start,
|
| + DeprotectMallocZone(default_zone, &default_reprotection_start,
|
| &default_reprotection_length,
|
| &default_reprotection_value);
|
|
|
| @@ -388,8 +367,7 @@ void EnableTerminationOnOutOfMemory() {
|
| mach_vm_size_t purgeable_reprotection_length = 0;
|
| vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
|
| if (purgeable_zone) {
|
| - DeprotectMallocZone(purgeable_zone,
|
| - &purgeable_reprotection_start,
|
| + DeprotectMallocZone(purgeable_zone, &purgeable_reprotection_start,
|
| &purgeable_reprotection_length,
|
| &purgeable_reprotection_value);
|
| }
|
| @@ -446,20 +424,16 @@ void EnableTerminationOnOutOfMemory() {
|
| // Restore protection if it was active.
|
|
|
| if (default_reprotection_start) {
|
| - kern_return_t result = mach_vm_protect(mach_task_self(),
|
| - default_reprotection_start,
|
| - default_reprotection_length,
|
| - false,
|
| - default_reprotection_value);
|
| + kern_return_t result = mach_vm_protect(
|
| + mach_task_self(), default_reprotection_start,
|
| + default_reprotection_length, false, default_reprotection_value);
|
| MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
|
| }
|
|
|
| if (purgeable_reprotection_start) {
|
| - kern_return_t result = mach_vm_protect(mach_task_self(),
|
| - purgeable_reprotection_start,
|
| - purgeable_reprotection_length,
|
| - false,
|
| - purgeable_reprotection_value);
|
| + kern_return_t result = mach_vm_protect(
|
| + mach_task_self(), purgeable_reprotection_start,
|
| + purgeable_reprotection_length, false, purgeable_reprotection_value);
|
| MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
|
| }
|
| #endif
|
| @@ -535,18 +509,18 @@ void EnableTerminationOnOutOfMemory() {
|
| // Note that both +[NSObject new] and +[NSObject alloc] call through to
|
| // +[NSObject allocWithZone:].
|
|
|
| - CHECK(!g_old_allocWithZone)
|
| - << "Old allocator unexpectedly non-null";
|
| + CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
|
|
|
| Class nsobject_class = [NSObject class];
|
| - Method orig_method = class_getClassMethod(nsobject_class,
|
| - @selector(allocWithZone:));
|
| - g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
|
| - method_getImplementation(orig_method));
|
| + Method orig_method =
|
| + class_getClassMethod(nsobject_class, @selector(allocWithZone:));
|
| + g_old_allocWithZone =
|
| + reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
|
| CHECK(g_old_allocWithZone)
|
| << "Failed to get allocWithZone allocation function.";
|
| method_setImplementation(orig_method,
|
| reinterpret_cast<IMP>(oom_killer_allocWithZone));
|
| }
|
|
|
| +} // namespace allocator
|
| } // namespace base
|
|
|