| Index: base/allocator/allocator_shim_override_mac_symbols.h
|
| diff --git a/base/allocator/allocator_shim_override_mac_symbols.h b/base/allocator/allocator_shim_override_mac_symbols.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..75c9b7c88a0c6b7f4a536651c9c6d8d519999fee
|
| --- /dev/null
|
| +++ b/base/allocator/allocator_shim_override_mac_symbols.h
|
| @@ -0,0 +1,225 @@
|
| +// Copyright 2016 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +// This header defines symbols to override the same functions in the Visual C++
|
| +// CRT implementation.
|
| +
|
| +#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
| +#error This header is meant to be included only once by allocator_shim.cc
|
| +#endif
|
| +#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
| +
|
| +#include <pthread.h>
|
| +#include <mach/mach.h>
|
| +#include <mach/mach_vm.h>
|
| +#include <malloc/malloc.h>
|
| +
|
| +#include "base/logging.h"
|
| +#include "base/mac/mach_logging.h"
|
| +#include "third_party/apple_apsl/malloc.h"
|
| +
|
| +namespace {
|
| +
|
| +using base::allocator::AllocatorDispatch;
|
| +
|
| +ChromeMallocZone g_unshimmed_zone;
|
| +
|
| +template <class F, class ...Args>
|
| +auto CallUnshimmed(F ChromeMallocZone::*m, Args... args)
|
| + -> decltype(std::declval<F>()(nullptr, args...)) {
|
| + F f = g_unshimmed_zone.*m ?
|
| + g_unshimmed_zone.*m :
|
| + reinterpret_cast<ChromeMallocZone*>(malloc_default_zone())->*m;
|
| + return f(malloc_default_zone(), args...);
|
| +}
|
| +
|
| +void* MallocImpl(const AllocatorDispatch*, size_t size) {
|
| + return CallUnshimmed(&ChromeMallocZone::malloc, size);
|
| +}
|
| +
|
| +void* CallocImpl(const AllocatorDispatch*, size_t n, size_t size) {
|
| + return CallUnshimmed(&ChromeMallocZone::calloc, n, size);
|
| +}
|
| +
|
| +void* MemalignImpl(const AllocatorDispatch*, size_t alignment, size_t size) {
|
| + ChromeMallocZone* defaul_zone =
|
| + reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
| + if (defaul_zone->version >= 5) {
|
| + return CallUnshimmed(&ChromeMallocZone::memalign, alignment, size);
|
| + } else if (alignment == GetCachedPageSize()) {
|
| + return CallUnshimmed(&ChromeMallocZone::valloc, size);
|
| + } else {
|
| + CHECK(false) << "memalign() is not implemented.";
|
| + return nullptr;
|
| + }
|
| +}
|
| +
|
| +void* ReallocImpl(const AllocatorDispatch*, void* ptr, size_t size) {
|
| + return CallUnshimmed(&ChromeMallocZone::realloc, ptr, size);
|
| +}
|
| +
|
| +void FreeImpl(const AllocatorDispatch*, void* ptr) {
|
| + CallUnshimmed(&ChromeMallocZone::free, ptr);
|
| +}
|
| +
|
| +size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr) {
|
| + return CallUnshimmed(&ChromeMallocZone::size, ptr);
|
| +}
|
| +
|
| +// Starting with Mac OS X 10.7, the zone allocators set up by the system are
|
| +// read-only, to prevent them from being overwritten in an attack. However,
|
| +// blindly unprotecting and reprotecting the zone allocators fails with
|
| +// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
|
| +// memory in its bss. Explicit saving/restoring of the protection is required.
|
| +//
|
| +// This function takes a pointer to a malloc zone, de-protects it if necessary,
|
| +// and returns (in the out parameters) a region of memory (if any) to be
|
| +// re-protected when modifications are complete. This approach assumes that
|
| +// there is no contention for the protection of this memory.
|
| +void DeprotectMallocZone(ChromeMallocZone* default_zone,
|
| + mach_vm_address_t* reprotection_start,
|
| + mach_vm_size_t* reprotection_length,
|
| + vm_prot_t* reprotection_value) {
|
| + mach_port_t unused;
|
| + *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
|
| + struct vm_region_basic_info_64 info;
|
| + mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
| + kern_return_t result =
|
| + mach_vm_region(mach_task_self(),
|
| + reprotection_start,
|
| + reprotection_length,
|
| + VM_REGION_BASIC_INFO_64,
|
| + reinterpret_cast<vm_region_info_t>(&info),
|
| + &count,
|
| + &unused);
|
| + MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region";
|
| +
|
| + // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
| + // balance it with a deallocate in case this ever changes. See 10.9.2
|
| + // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
|
| + mach_port_deallocate(mach_task_self(), unused);
|
| +
|
| + // Does the region fully enclose the zone pointers? Possibly unwarranted
|
| + // simplification used: using the size of a full version 8 malloc zone rather
|
| + // than the actual smaller size if the passed-in zone is not version 8.
|
| + CHECK(*reprotection_start <=
|
| + reinterpret_cast<mach_vm_address_t>(default_zone));
|
| + mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
|
| + reinterpret_cast<mach_vm_size_t>(*reprotection_start);
|
| + CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
| +
|
| + if (info.protection & VM_PROT_WRITE) {
|
| + // No change needed; the zone is already writable.
|
| + *reprotection_start = 0;
|
| + *reprotection_length = 0;
|
| + *reprotection_value = VM_PROT_NONE;
|
| + } else {
|
| + *reprotection_value = info.protection;
|
| + result = mach_vm_protect(mach_task_self(),
|
| + *reprotection_start,
|
| + *reprotection_length,
|
| + false,
|
| + info.protection | VM_PROT_WRITE);
|
| + MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
|
| + }
|
| +}
|
| +
|
| +void ShimZoneCallbacks(ChromeMallocZone* zone) {
|
| + zone->size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
| + return ShimGetSizeEstimate(ptr);
|
| + };
|
| + zone->malloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
| + return ShimMalloc(size);
|
| + };
|
| + zone->calloc = [](malloc_zone_t* zone, size_t n, size_t size) -> void* {
|
| + return ShimCalloc(n, size);
|
| + };
|
| + zone->valloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
| + return ShimValloc(size);
|
| + };
|
| + zone->free = [](malloc_zone_t* zone, void* ptr) {
|
| + ShimFree(ptr);
|
| + };
|
| + zone->realloc = [](malloc_zone_t* zone, void* ptr, size_t size) -> void* {
|
| + return ShimRealloc(ptr, size);
|
| + };
|
| +
|
| + if (zone->version >= 5) {
|
| + zone->memalign = [](malloc_zone_t* zone,
|
| + size_t alignment, size_t size) -> void* {
|
| + return ShimMemalign(alignment, size);
|
| + };
|
| + }
|
| + if (zone->version >= 6) {
|
| + zone->free_definite_size = [](malloc_zone_t* zone,
|
| + void* ptr, size_t size) {
|
| + ShimFree(ptr);
|
| + };
|
| + }
|
| +}
|
| +
|
| +void InitializeAllocatorShim() {
|
| + ChromeMallocZone* default_zone =
|
| + reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
| +
|
| + mach_vm_address_t default_reprotection_start = 0;
|
| + mach_vm_size_t default_reprotection_length = 0;
|
| + vm_prot_t default_reprotection_value = VM_PROT_NONE;
|
| + DeprotectMallocZone(default_zone,
|
| + &default_reprotection_start,
|
| + &default_reprotection_length,
|
| + &default_reprotection_value);
|
| +
|
| + CHECK(default_zone->size &&
|
| + default_zone->malloc &&
|
| + default_zone->calloc &&
|
| + default_zone->valloc &&
|
| + default_zone->free &&
|
| + default_zone->realloc)
|
| + << "Failed to get system allocation functions.";
|
| + g_unshimmed_zone.size = default_zone->size;
|
| + g_unshimmed_zone.malloc = default_zone->malloc;
|
| + g_unshimmed_zone.calloc = default_zone->calloc;
|
| + g_unshimmed_zone.valloc = default_zone->valloc;
|
| + g_unshimmed_zone.free = default_zone->free;
|
| + g_unshimmed_zone.realloc = default_zone->realloc;
|
| +
|
| + g_unshimmed_zone.version = default_zone->version;
|
| + if (default_zone->version >= 5) {
|
| + g_unshimmed_zone.memalign = default_zone->memalign;
|
| + }
|
| + if (default_zone->version >= 6) {
|
| + g_unshimmed_zone.free_definite_size = default_zone->free_definite_size;
|
| + }
|
| +
|
| + ShimZoneCallbacks(default_zone);
|
| +
|
| + if (default_reprotection_start) {
|
| + kern_return_t result = mach_vm_protect(mach_task_self(),
|
| + default_reprotection_start,
|
| + default_reprotection_length,
|
| + false,
|
| + default_reprotection_value);
|
| + MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
|
| + }
|
| +}
|
| +
|
| +struct AllocatorShimInitializer {
|
| + AllocatorShimInitializer() {
|
| + pthread_once_t once = PTHREAD_ONCE_INIT;
|
| + pthread_once(&once, []() { InitializeAllocatorShim(); });
|
| + }
|
| +} g_allocator_shim_initializer;
|
| +
|
| +} // namespace
|
| +
|
| +const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
| + &MallocImpl, /* alloc_function */
|
| + &CallocImpl, /* alloc_zero_initialized_function */
|
| + &MemalignImpl, /* alloc_aligned_function */
|
| + &ReallocImpl, /* realloc_function */
|
| + &FreeImpl, /* free_function */
|
| + &GetSizeEstimateImpl, /* get_size_estimate_function */
|
| + nullptr, /* next */
|
| +};
|
|
|