| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 // This header defines symbols to override the same functions in the Visual C++ |
| 6 // CRT implementation. |
| 7 |
| 8 #ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_ |
| 9 #error This header is meant to be included only once by allocator_shim.cc |
| 10 #endif |
| 11 #define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_ |
| 12 |
| 13 #include <pthread.h> |
| 14 #include <mach/mach.h> |
| 15 #include <mach/mach_vm.h> |
| 16 #include <malloc/malloc.h> |
| 17 |
| 18 #include "base/logging.h" |
| 19 #include "base/mac/mach_logging.h" |
| 20 #include "third_party/apple_apsl/malloc.h" |
| 21 |
| 22 namespace { |
| 23 |
| 24 using base::allocator::AllocatorDispatch; |
| 25 |
| 26 ChromeMallocZone g_unshimmed_zone; |
| 27 |
| 28 template <class F, class ...Args> |
| 29 auto CallUnshimmed(F ChromeMallocZone::*m, Args... args) |
| 30 -> decltype(std::declval<F>()(nullptr, args...)) { |
| 31 F f = g_unshimmed_zone.*m ? |
| 32 g_unshimmed_zone.*m : |
| 33 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone())->*m; |
| 34 return f(malloc_default_zone(), args...); |
| 35 } |
| 36 |
| 37 void* MallocImpl(const AllocatorDispatch*, size_t size) { |
| 38 return CallUnshimmed(&ChromeMallocZone::malloc, size); |
| 39 } |
| 40 |
| 41 void* CallocImpl(const AllocatorDispatch*, size_t n, size_t size) { |
| 42 return CallUnshimmed(&ChromeMallocZone::calloc, n, size); |
| 43 } |
| 44 |
| 45 void* MemalignImpl(const AllocatorDispatch*, size_t alignment, size_t size) { |
| 46 ChromeMallocZone* defaul_zone = |
| 47 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); |
| 48 if (defaul_zone->version >= 5) { |
| 49 return CallUnshimmed(&ChromeMallocZone::memalign, alignment, size); |
| 50 } else if (alignment == GetCachedPageSize()) { |
| 51 return CallUnshimmed(&ChromeMallocZone::valloc, size); |
| 52 } else { |
| 53 CHECK(false) << "memalign() is not implemented."; |
| 54 return nullptr; |
| 55 } |
| 56 } |
| 57 |
| 58 void* ReallocImpl(const AllocatorDispatch*, void* ptr, size_t size) { |
| 59 return CallUnshimmed(&ChromeMallocZone::realloc, ptr, size); |
| 60 } |
| 61 |
| 62 void FreeImpl(const AllocatorDispatch*, void* ptr) { |
| 63 CallUnshimmed(&ChromeMallocZone::free, ptr); |
| 64 } |
| 65 |
| 66 size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr) { |
| 67 return CallUnshimmed(&ChromeMallocZone::size, ptr); |
| 68 } |
| 69 |
| 70 // Starting with Mac OS X 10.7, the zone allocators set up by the system are |
| 71 // read-only, to prevent them from being overwritten in an attack. However, |
| 72 // blindly unprotecting and reprotecting the zone allocators fails with |
| 73 // GuardMalloc because GuardMalloc sets up its zone allocator using a block of |
| 74 // memory in its bss. Explicit saving/restoring of the protection is required. |
| 75 // |
| 76 // This function takes a pointer to a malloc zone, de-protects it if necessary, |
| 77 // and returns (in the out parameters) a region of memory (if any) to be |
| 78 // re-protected when modifications are complete. This approach assumes that |
| 79 // there is no contention for the protection of this memory. |
| 80 void DeprotectMallocZone(ChromeMallocZone* default_zone, |
| 81 mach_vm_address_t* reprotection_start, |
| 82 mach_vm_size_t* reprotection_length, |
| 83 vm_prot_t* reprotection_value) { |
| 84 mach_port_t unused; |
| 85 *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone); |
| 86 struct vm_region_basic_info_64 info; |
| 87 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; |
| 88 kern_return_t result = |
| 89 mach_vm_region(mach_task_self(), |
| 90 reprotection_start, |
| 91 reprotection_length, |
| 92 VM_REGION_BASIC_INFO_64, |
| 93 reinterpret_cast<vm_region_info_t>(&info), |
| 94 &count, |
| 95 &unused); |
| 96 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region"; |
| 97 |
| 98 // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but |
| 99 // balance it with a deallocate in case this ever changes. See 10.9.2 |
| 100 // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region. |
| 101 mach_port_deallocate(mach_task_self(), unused); |
| 102 |
| 103 // Does the region fully enclose the zone pointers? Possibly unwarranted |
| 104 // simplification used: using the size of a full version 8 malloc zone rather |
| 105 // than the actual smaller size if the passed-in zone is not version 8. |
| 106 CHECK(*reprotection_start <= |
| 107 reinterpret_cast<mach_vm_address_t>(default_zone)); |
| 108 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) - |
| 109 reinterpret_cast<mach_vm_size_t>(*reprotection_start); |
| 110 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length); |
| 111 |
| 112 if (info.protection & VM_PROT_WRITE) { |
| 113 // No change needed; the zone is already writable. |
| 114 *reprotection_start = 0; |
| 115 *reprotection_length = 0; |
| 116 *reprotection_value = VM_PROT_NONE; |
| 117 } else { |
| 118 *reprotection_value = info.protection; |
| 119 result = mach_vm_protect(mach_task_self(), |
| 120 *reprotection_start, |
| 121 *reprotection_length, |
| 122 false, |
| 123 info.protection | VM_PROT_WRITE); |
| 124 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; |
| 125 } |
| 126 } |
| 127 |
| 128 void ShimZoneCallbacks(ChromeMallocZone* zone) { |
| 129 zone->size = [](malloc_zone_t* zone, const void* ptr) -> size_t { |
| 130 return ShimGetSizeEstimate(ptr); |
| 131 }; |
| 132 zone->malloc = [](malloc_zone_t* zone, size_t size) -> void* { |
| 133 return ShimMalloc(size); |
| 134 }; |
| 135 zone->calloc = [](malloc_zone_t* zone, size_t n, size_t size) -> void* { |
| 136 return ShimCalloc(n, size); |
| 137 }; |
| 138 zone->valloc = [](malloc_zone_t* zone, size_t size) -> void* { |
| 139 return ShimValloc(size); |
| 140 }; |
| 141 zone->free = [](malloc_zone_t* zone, void* ptr) { |
| 142 ShimFree(ptr); |
| 143 }; |
| 144 zone->realloc = [](malloc_zone_t* zone, void* ptr, size_t size) -> void* { |
| 145 return ShimRealloc(ptr, size); |
| 146 }; |
| 147 |
| 148 if (zone->version >= 5) { |
| 149 zone->memalign = [](malloc_zone_t* zone, |
| 150 size_t alignment, size_t size) -> void* { |
| 151 return ShimMemalign(alignment, size); |
| 152 }; |
| 153 } |
| 154 if (zone->version >= 6) { |
| 155 zone->free_definite_size = [](malloc_zone_t* zone, |
| 156 void* ptr, size_t size) { |
| 157 ShimFree(ptr); |
| 158 }; |
| 159 } |
| 160 } |
| 161 |
| 162 void InitializeAllocatorShim() { |
| 163 ChromeMallocZone* default_zone = |
| 164 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); |
| 165 |
| 166 mach_vm_address_t default_reprotection_start = 0; |
| 167 mach_vm_size_t default_reprotection_length = 0; |
| 168 vm_prot_t default_reprotection_value = VM_PROT_NONE; |
| 169 DeprotectMallocZone(default_zone, |
| 170 &default_reprotection_start, |
| 171 &default_reprotection_length, |
| 172 &default_reprotection_value); |
| 173 |
| 174 CHECK(default_zone->size && |
| 175 default_zone->malloc && |
| 176 default_zone->calloc && |
| 177 default_zone->valloc && |
| 178 default_zone->free && |
| 179 default_zone->realloc) |
| 180 << "Failed to get system allocation functions."; |
| 181 g_unshimmed_zone.size = default_zone->size; |
| 182 g_unshimmed_zone.malloc = default_zone->malloc; |
| 183 g_unshimmed_zone.calloc = default_zone->calloc; |
| 184 g_unshimmed_zone.valloc = default_zone->valloc; |
| 185 g_unshimmed_zone.free = default_zone->free; |
| 186 g_unshimmed_zone.realloc = default_zone->realloc; |
| 187 |
| 188 g_unshimmed_zone.version = default_zone->version; |
| 189 if (default_zone->version >= 5) { |
| 190 g_unshimmed_zone.memalign = default_zone->memalign; |
| 191 } |
| 192 if (default_zone->version >= 6) { |
| 193 g_unshimmed_zone.free_definite_size = default_zone->free_definite_size; |
| 194 } |
| 195 |
| 196 ShimZoneCallbacks(default_zone); |
| 197 |
| 198 if (default_reprotection_start) { |
| 199 kern_return_t result = mach_vm_protect(mach_task_self(), |
| 200 default_reprotection_start, |
| 201 default_reprotection_length, |
| 202 false, |
| 203 default_reprotection_value); |
| 204 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; |
| 205 } |
| 206 } |
| 207 |
| 208 struct AllocatorShimInitializer { |
| 209 AllocatorShimInitializer() { |
| 210 pthread_once_t once = PTHREAD_ONCE_INIT; |
| 211 pthread_once(&once, []() { InitializeAllocatorShim(); }); |
| 212 } |
| 213 } g_allocator_shim_initializer; |
| 214 |
| 215 } // namespace |
| 216 |
| 217 const AllocatorDispatch AllocatorDispatch::default_dispatch = { |
| 218 &MallocImpl, /* alloc_function */ |
| 219 &CallocImpl, /* alloc_zero_initialized_function */ |
| 220 &MemalignImpl, /* alloc_aligned_function */ |
| 221 &ReallocImpl, /* realloc_function */ |
| 222 &FreeImpl, /* free_function */ |
| 223 &GetSizeEstimateImpl, /* get_size_estimate_function */ |
| 224 nullptr, /* next */ |
| 225 }; |
| OLD | NEW |