Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains all the logic necessary to intercept allocations on | 5 // This file contains all the logic necessary to intercept allocations on |
| 6 // macOS. "malloc zones" are an abstraction that allows the process to intercept | 6 // macOS. "malloc zones" are an abstraction that allows the process to intercept |
| 7 // all malloc-related functions. There is no good mechanism [short of | 7 // all malloc-related functions. There is no good mechanism [short of |
| 8 // interposition] to determine new malloc zones are added, so there's no clean | 8 // interposition] to determine new malloc zones are added, so there's no clean |
| 9 // mechanism to intercept all malloc zones. This file contains logic to | 9 // mechanism to intercept all malloc zones. This file contains logic to |
| 10 // intercept the default and purgeable zones, which always exist. A cursory | 10 // intercept the default and purgeable zones, which always exist. A cursory |
| 11 // review of Chrome seems to imply that non-default zones are almost never used. | 11 // review of Chrome seems to imply that non-default zones are almost never used. |
| 12 // | 12 // |
| 13 // This file also contains logic to intercept Core Foundation and Objective-C | 13 // This file also contains logic to intercept Core Foundation and Objective-C |
| 14 // allocations. The implementations forward to the default malloc zone, so the | 14 // allocations. The implementations forward to the default malloc zone, so the |
| 15 // only reason to intercept these calls is to re-label OOM crashes with slightly | 15 // only reason to intercept these calls is to re-label OOM crashes with slightly |
| 16 // more details. | 16 // more details. |
| 17 | 17 |
| 18 #include "base/allocator/allocator_interception_mac.h" | 18 #include "base/allocator/allocator_interception_mac.h" |
| 19 | 19 |
| 20 #include <CoreFoundation/CoreFoundation.h> | 20 #include <CoreFoundation/CoreFoundation.h> |
| 21 #import <Foundation/Foundation.h> | 21 #import <Foundation/Foundation.h> |
| 22 #include <errno.h> | 22 #include <errno.h> |
| 23 #include <mach/mach.h> | 23 #include <mach/mach.h> |
| 24 #include <mach/mach_vm.h> | 24 #include <mach/mach_vm.h> |
| 25 #include <malloc/malloc.h> | |
| 26 #import <objc/runtime.h> | 25 #import <objc/runtime.h> |
| 27 #include <stddef.h> | 26 #include <stddef.h> |
| 28 | 27 |
| 29 #include <new> | 28 #include <new> |
| 30 | 29 |
| 31 #include "base/logging.h" | 30 #include "base/logging.h" |
| 32 #include "base/mac/mac_util.h" | 31 #include "base/mac/mac_util.h" |
| 33 #include "base/mac/mach_logging.h" | 32 #include "base/mac/mach_logging.h" |
| 34 #include "base/process/memory.h" | 33 #include "base/process/memory.h" |
| 35 #include "base/scoped_clear_errno.h" | 34 #include "base/scoped_clear_errno.h" |
| 36 #include "build/build_config.h" | 35 #include "build/build_config.h" |
| 37 #include "third_party/apple_apsl/CFBase.h" | 36 #include "third_party/apple_apsl/CFBase.h" |
| 38 #include "third_party/apple_apsl/malloc.h" | |
| 39 | 37 |
| 40 namespace base { | 38 namespace base { |
| 41 namespace allocator { | 39 namespace allocator { |
| 42 | 40 |
| 43 namespace { | 41 namespace { |
| 44 | 42 |
| 45 bool g_oom_killer_enabled; | 43 bool g_oom_killer_enabled; |
| 46 | 44 |
| 47 #if !defined(ADDRESS_SANITIZER) | 45 #if !defined(ADDRESS_SANITIZER) |
| 48 | 46 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 92 *reprotection_value = VM_PROT_NONE; | 90 *reprotection_value = VM_PROT_NONE; |
| 93 } else { | 91 } else { |
| 94 *reprotection_value = info.protection; | 92 *reprotection_value = info.protection; |
| 95 result = mach_vm_protect(mach_task_self(), *reprotection_start, | 93 result = mach_vm_protect(mach_task_self(), *reprotection_start, |
| 96 *reprotection_length, false, | 94 *reprotection_length, false, |
| 97 info.protection | VM_PROT_WRITE); | 95 info.protection | VM_PROT_WRITE); |
| 98 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; | 96 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; |
| 99 } | 97 } |
| 100 } | 98 } |
| 101 | 99 |
| 102 // === C malloc/calloc/valloc/realloc/posix_memalign === | 100 MallocZoneFunctions g_old_zone; |
| 103 | 101 MallocZoneFunctions g_old_purgeable_zone; |
| 104 typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size); | |
| 105 typedef void* (*calloc_type)(struct _malloc_zone_t* zone, | |
| 106 size_t num_items, | |
| 107 size_t size); | |
| 108 typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size); | |
| 109 typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr); | |
| 110 typedef void* (*realloc_type)(struct _malloc_zone_t* zone, | |
| 111 void* ptr, | |
| 112 size_t size); | |
| 113 typedef void* (*memalign_type)(struct _malloc_zone_t* zone, | |
| 114 size_t alignment, | |
| 115 size_t size); | |
| 116 | |
| 117 malloc_type g_old_malloc; | |
| 118 calloc_type g_old_calloc; | |
| 119 valloc_type g_old_valloc; | |
| 120 free_type g_old_free; | |
| 121 realloc_type g_old_realloc; | |
| 122 memalign_type g_old_memalign; | |
| 123 | |
| 124 malloc_type g_old_malloc_purgeable; | |
| 125 calloc_type g_old_calloc_purgeable; | |
| 126 valloc_type g_old_valloc_purgeable; | |
| 127 free_type g_old_free_purgeable; | |
| 128 realloc_type g_old_realloc_purgeable; | |
| 129 memalign_type g_old_memalign_purgeable; | |
| 130 | 102 |
| 131 void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) { | 103 void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) { |
| 132 void* result = g_old_malloc(zone, size); | 104 void* result = g_old_zone.malloc(zone, size); |
| 133 if (!result && size) | 105 if (!result && size) |
| 134 TerminateBecauseOutOfMemory(size); | 106 TerminateBecauseOutOfMemory(size); |
| 135 return result; | 107 return result; |
| 136 } | 108 } |
| 137 | 109 |
| 138 void* oom_killer_calloc(struct _malloc_zone_t* zone, | 110 void* oom_killer_calloc(struct _malloc_zone_t* zone, |
| 139 size_t num_items, | 111 size_t num_items, |
| 140 size_t size) { | 112 size_t size) { |
| 141 void* result = g_old_calloc(zone, num_items, size); | 113 void* result = g_old_zone.calloc(zone, num_items, size); |
| 142 if (!result && num_items && size) | 114 if (!result && num_items && size) |
| 143 TerminateBecauseOutOfMemory(num_items * size); | 115 TerminateBecauseOutOfMemory(num_items * size); |
| 144 return result; | 116 return result; |
| 145 } | 117 } |
| 146 | 118 |
| 147 void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) { | 119 void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) { |
| 148 void* result = g_old_valloc(zone, size); | 120 void* result = g_old_zone.valloc(zone, size); |
| 149 if (!result && size) | 121 if (!result && size) |
| 150 TerminateBecauseOutOfMemory(size); | 122 TerminateBecauseOutOfMemory(size); |
| 151 return result; | 123 return result; |
| 152 } | 124 } |
| 153 | 125 |
| 154 void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) { | 126 void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) { |
| 155 g_old_free(zone, ptr); | 127 g_old_zone.free(zone, ptr); |
| 156 } | 128 } |
| 157 | 129 |
| 158 void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) { | 130 void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) { |
| 159 void* result = g_old_realloc(zone, ptr, size); | 131 void* result = g_old_zone.realloc(zone, ptr, size); |
| 160 if (!result && size) | 132 if (!result && size) |
| 161 TerminateBecauseOutOfMemory(size); | 133 TerminateBecauseOutOfMemory(size); |
| 162 return result; | 134 return result; |
| 163 } | 135 } |
| 164 | 136 |
| 165 void* oom_killer_memalign(struct _malloc_zone_t* zone, | 137 void* oom_killer_memalign(struct _malloc_zone_t* zone, |
| 166 size_t alignment, | 138 size_t alignment, |
| 167 size_t size) { | 139 size_t size) { |
| 168 void* result = g_old_memalign(zone, alignment, size); | 140 void* result = g_old_zone.memalign(zone, alignment, size); |
| 169 // Only die if posix_memalign would have returned ENOMEM, since there are | 141 // Only die if posix_memalign would have returned ENOMEM, since there are |
| 170 // other reasons why NULL might be returned (see | 142 // other reasons why NULL might be returned (see |
| 171 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). | 143 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). |
| 172 if (!result && size && alignment >= sizeof(void*) && | 144 if (!result && size && alignment >= sizeof(void*) && |
| 173 (alignment & (alignment - 1)) == 0) { | 145 (alignment & (alignment - 1)) == 0) { |
| 174 TerminateBecauseOutOfMemory(size); | 146 TerminateBecauseOutOfMemory(size); |
| 175 } | 147 } |
| 176 return result; | 148 return result; |
| 177 } | 149 } |
| 178 | 150 |
| 179 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) { | 151 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) { |
| 180 void* result = g_old_malloc_purgeable(zone, size); | 152 void* result = g_old_purgeable_zone.malloc(zone, size); |
| 181 if (!result && size) | 153 if (!result && size) |
| 182 TerminateBecauseOutOfMemory(size); | 154 TerminateBecauseOutOfMemory(size); |
| 183 return result; | 155 return result; |
| 184 } | 156 } |
| 185 | 157 |
| 186 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone, | 158 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone, |
| 187 size_t num_items, | 159 size_t num_items, |
| 188 size_t size) { | 160 size_t size) { |
| 189 void* result = g_old_calloc_purgeable(zone, num_items, size); | 161 void* result = g_old_purgeable_zone.calloc(zone, num_items, size); |
| 190 if (!result && num_items && size) | 162 if (!result && num_items && size) |
| 191 TerminateBecauseOutOfMemory(num_items * size); | 163 TerminateBecauseOutOfMemory(num_items * size); |
| 192 return result; | 164 return result; |
| 193 } | 165 } |
| 194 | 166 |
| 195 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) { | 167 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) { |
| 196 void* result = g_old_valloc_purgeable(zone, size); | 168 void* result = g_old_purgeable_zone.valloc(zone, size); |
| 197 if (!result && size) | 169 if (!result && size) |
| 198 TerminateBecauseOutOfMemory(size); | 170 TerminateBecauseOutOfMemory(size); |
| 199 return result; | 171 return result; |
| 200 } | 172 } |
| 201 | 173 |
| 202 void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) { | 174 void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) { |
| 203 g_old_free_purgeable(zone, ptr); | 175 g_old_purgeable_zone.free(zone, ptr); |
| 204 } | 176 } |
| 205 | 177 |
| 206 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone, | 178 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone, |
| 207 void* ptr, | 179 void* ptr, |
| 208 size_t size) { | 180 size_t size) { |
| 209 void* result = g_old_realloc_purgeable(zone, ptr, size); | 181 void* result = g_old_purgeable_zone.realloc(zone, ptr, size); |
| 210 if (!result && size) | 182 if (!result && size) |
| 211 TerminateBecauseOutOfMemory(size); | 183 TerminateBecauseOutOfMemory(size); |
| 212 return result; | 184 return result; |
| 213 } | 185 } |
| 214 | 186 |
| 215 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone, | 187 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone, |
| 216 size_t alignment, | 188 size_t alignment, |
| 217 size_t size) { | 189 size_t size) { |
| 218 void* result = g_old_memalign_purgeable(zone, alignment, size); | 190 void* result = g_old_purgeable_zone.memalign(zone, alignment, size); |
| 219 // Only die if posix_memalign would have returned ENOMEM, since there are | 191 // Only die if posix_memalign would have returned ENOMEM, since there are |
| 220 // other reasons why NULL might be returned (see | 192 // other reasons why NULL might be returned (see |
| 221 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). | 193 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). |
| 222 if (!result && size && alignment >= sizeof(void*) && | 194 if (!result && size && alignment >= sizeof(void*) && |
| 223 (alignment & (alignment - 1)) == 0) { | 195 (alignment & (alignment - 1)) == 0) { |
| 224 TerminateBecauseOutOfMemory(size); | 196 TerminateBecauseOutOfMemory(size); |
| 225 } | 197 } |
| 226 return result; | 198 return result; |
| 227 } | 199 } |
| 228 | 200 |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 292 TerminateBecauseOutOfMemory(0); | 264 TerminateBecauseOutOfMemory(0); |
| 293 return result; | 265 return result; |
| 294 } | 266 } |
| 295 | 267 |
| 296 } // namespace | 268 } // namespace |
| 297 | 269 |
| 298 bool UncheckedMallocMac(size_t size, void** result) { | 270 bool UncheckedMallocMac(size_t size, void** result) { |
| 299 #if defined(ADDRESS_SANITIZER) | 271 #if defined(ADDRESS_SANITIZER) |
| 300 *result = malloc(size); | 272 *result = malloc(size); |
| 301 #else | 273 #else |
| 302 if (g_old_malloc) { | 274 if (g_old_zone.malloc) { |
| 303 *result = g_old_malloc(malloc_default_zone(), size); | 275 *result = g_old_zone.malloc(malloc_default_zone(), size); |
| 304 } else { | 276 } else { |
| 305 *result = malloc(size); | 277 *result = malloc(size); |
| 306 } | 278 } |
| 307 #endif // defined(ADDRESS_SANITIZER) | 279 #endif // defined(ADDRESS_SANITIZER) |
| 308 | 280 |
| 309 return *result != NULL; | 281 return *result != NULL; |
| 310 } | 282 } |
| 311 | 283 |
| 312 bool UncheckedCallocMac(size_t num_items, size_t size, void** result) { | 284 bool UncheckedCallocMac(size_t num_items, size_t size, void** result) { |
| 313 #if defined(ADDRESS_SANITIZER) | 285 #if defined(ADDRESS_SANITIZER) |
| 314 *result = calloc(num_items, size); | 286 *result = calloc(num_items, size); |
| 315 #else | 287 #else |
| 316 if (g_old_calloc) { | 288 if (g_old_zone.calloc) { |
| 317 *result = g_old_calloc(malloc_default_zone(), num_items, size); | 289 *result = g_old_zone.calloc(malloc_default_zone(), num_items, size); |
| 318 } else { | 290 } else { |
| 319 *result = calloc(num_items, size); | 291 *result = calloc(num_items, size); |
| 320 } | 292 } |
| 321 #endif // defined(ADDRESS_SANITIZER) | 293 #endif // defined(ADDRESS_SANITIZER) |
| 322 | 294 |
| 323 return *result != NULL; | 295 return *result != NULL; |
| 324 } | 296 } |
| 325 | 297 |
| 298 void StoreZoneFunctions(ChromeMallocZone* zone, | |
| 299 MallocZoneFunctions* functions) { | |
| 300 functions->malloc = zone->malloc; | |
|
Primiano Tucci (use gerrit)
2017/01/26 01:45:14
not sure if it makes a difference, just checking:
erikchen
2017/01/26 01:59:14
correct.
| |
| 301 functions->calloc = zone->calloc; | |
| 302 functions->valloc = zone->valloc; | |
| 303 functions->free = zone->free; | |
| 304 functions->realloc = zone->realloc; | |
| 305 if (zone->version >= 5) { | |
| 306 functions->memalign = zone->memalign; | |
| 307 } | |
| 308 } | |
| 309 | |
| 310 void ReplaceZoneFunctions(ChromeMallocZone* zone, | |
| 311 MallocZoneFunctions* functions) { | |
| 312 // Remove protection. | |
| 313 mach_vm_address_t reprotection_start = 0; | |
| 314 mach_vm_size_t reprotection_length = 0; | |
| 315 vm_prot_t reprotection_value = VM_PROT_NONE; | |
| 316 DeprotectMallocZone(zone, &reprotection_start, &reprotection_length, | |
| 317 &reprotection_value); | |
| 318 | |
| 319 zone->malloc = functions->malloc; | |
| 320 zone->calloc = functions->calloc; | |
| 321 zone->valloc = functions->valloc; | |
| 322 zone->free = functions->free; | |
| 323 zone->realloc = functions->realloc; | |
| 324 if (zone->version >= 5) { | |
| 325 zone->memalign = functions->memalign; | |
| 326 } | |
| 327 | |
| 328 // Restore protection if it was active. | |
| 329 if (reprotection_start) { | |
| 330 kern_return_t result = | |
| 331 mach_vm_protect(mach_task_self(), reprotection_start, | |
| 332 reprotection_length, false, reprotection_value); | |
| 333 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; | |
| 334 } | |
| 335 } | |
| 336 | |
| 326 void InterceptAllocationsMac() { | 337 void InterceptAllocationsMac() { |
| 327 if (g_oom_killer_enabled) | 338 if (g_oom_killer_enabled) |
| 328 return; | 339 return; |
| 329 | 340 |
| 330 g_oom_killer_enabled = true; | 341 g_oom_killer_enabled = true; |
| 331 | 342 |
| 332 // === C malloc/calloc/valloc/realloc/posix_memalign === | 343 // === C malloc/calloc/valloc/realloc/posix_memalign === |
| 333 | 344 |
| 334 // This approach is not perfect, as requests for amounts of memory larger than | 345 // This approach is not perfect, as requests for amounts of memory larger than |
| 335 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will | 346 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will |
| 336 // still fail with a NULL rather than dying (see | 347 // still fail with a NULL rather than dying (see |
| 337 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). | 348 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). |
| 338 // Unfortunately, it's the best we can do. Also note that this does not affect | 349 // Unfortunately, it's the best we can do. Also note that this does not affect |
| 339 // allocations from non-default zones. | 350 // allocations from non-default zones. |
| 340 | 351 |
| 341 #if !defined(ADDRESS_SANITIZER) | 352 #if !defined(ADDRESS_SANITIZER) |
| 342 // Don't do anything special on OOM for the malloc zones replaced by | 353 // Don't do anything special on OOM for the malloc zones replaced by |
| 343 // AddressSanitizer, as modifying or protecting them may not work correctly. | 354 // AddressSanitizer, as modifying or protecting them may not work correctly. |
| 344 | |
| 345 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc && | |
|
Primiano Tucci (use gerrit)
2017/01/26 01:45:14
shouldn't we keep these checks somewhere?
erikchen
2017/01/26 01:59:14
These checks are pointless, since these values are
| |
| 346 !g_old_memalign) | |
| 347 << "Old allocators unexpectedly non-null"; | |
| 348 | |
| 349 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable && | |
| 350 !g_old_valloc_purgeable && !g_old_realloc_purgeable && | |
| 351 !g_old_memalign_purgeable) | |
| 352 << "Old allocators unexpectedly non-null"; | |
| 353 | |
| 354 ChromeMallocZone* default_zone = | 355 ChromeMallocZone* default_zone = |
| 355 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); | 356 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); |
| 357 StoreZoneFunctions(default_zone, &g_old_zone); | |
| 358 MallocZoneFunctions new_functions; | |
| 359 new_functions.malloc = oom_killer_malloc; | |
| 360 new_functions.calloc = oom_killer_calloc; | |
| 361 new_functions.valloc = oom_killer_valloc; | |
| 362 new_functions.free = oom_killer_free; | |
| 363 new_functions.realloc = oom_killer_realloc; | |
| 364 new_functions.memalign = oom_killer_memalign; | |
| 365 ReplaceZoneFunctions(default_zone, &new_functions); | |
| 366 | |
| 356 ChromeMallocZone* purgeable_zone = | 367 ChromeMallocZone* purgeable_zone = |
| 357 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone()); | 368 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone()); |
| 358 | |
| 359 mach_vm_address_t default_reprotection_start = 0; | |
| 360 mach_vm_size_t default_reprotection_length = 0; | |
| 361 vm_prot_t default_reprotection_value = VM_PROT_NONE; | |
| 362 DeprotectMallocZone(default_zone, &default_reprotection_start, | |
| 363 &default_reprotection_length, | |
| 364 &default_reprotection_value); | |
| 365 | |
| 366 mach_vm_address_t purgeable_reprotection_start = 0; | |
| 367 mach_vm_size_t purgeable_reprotection_length = 0; | |
| 368 vm_prot_t purgeable_reprotection_value = VM_PROT_NONE; | |
| 369 if (purgeable_zone) { | 369 if (purgeable_zone) { |
| 370 DeprotectMallocZone(purgeable_zone, &purgeable_reprotection_start, | 370 StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone); |
| 371 &purgeable_reprotection_length, | 371 MallocZoneFunctions new_functions; |
| 372 &purgeable_reprotection_value); | 372 new_functions.malloc = oom_killer_malloc_purgeable; |
| 373 } | 373 new_functions.calloc = oom_killer_calloc_purgeable; |
| 374 | 374 new_functions.valloc = oom_killer_valloc_purgeable; |
| 375 // Default zone | 375 new_functions.free = oom_killer_free_purgeable; |
| 376 | 376 new_functions.realloc = oom_killer_realloc_purgeable; |
| 377 g_old_malloc = default_zone->malloc; | 377 new_functions.memalign = oom_killer_memalign_purgeable; |
| 378 g_old_calloc = default_zone->calloc; | 378 ReplaceZoneFunctions(purgeable_zone, &new_functions); |
| 379 g_old_valloc = default_zone->valloc; | |
| 380 g_old_free = default_zone->free; | |
| 381 g_old_realloc = default_zone->realloc; | |
| 382 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free && | |
|
Primiano Tucci (use gerrit)
2017/01/26 01:45:14
same here?
erikchen
2017/01/26 01:59:14
There's more value to keeping these checks. I've r
| |
| 383 g_old_realloc) | |
| 384 << "Failed to get system allocation functions."; | |
| 385 | |
| 386 default_zone->malloc = oom_killer_malloc; | |
| 387 default_zone->calloc = oom_killer_calloc; | |
| 388 default_zone->valloc = oom_killer_valloc; | |
| 389 default_zone->free = oom_killer_free; | |
| 390 default_zone->realloc = oom_killer_realloc; | |
| 391 | |
| 392 if (default_zone->version >= 5) { | |
| 393 g_old_memalign = default_zone->memalign; | |
| 394 if (g_old_memalign) | |
| 395 default_zone->memalign = oom_killer_memalign; | |
| 396 } | |
| 397 | |
| 398 // Purgeable zone (if it exists) | |
| 399 | |
| 400 if (purgeable_zone) { | |
| 401 g_old_malloc_purgeable = purgeable_zone->malloc; | |
| 402 g_old_calloc_purgeable = purgeable_zone->calloc; | |
| 403 g_old_valloc_purgeable = purgeable_zone->valloc; | |
| 404 g_old_free_purgeable = purgeable_zone->free; | |
| 405 g_old_realloc_purgeable = purgeable_zone->realloc; | |
| 406 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable && | |
| 407 g_old_valloc_purgeable && g_old_free_purgeable && | |
| 408 g_old_realloc_purgeable) | |
| 409 << "Failed to get system allocation functions."; | |
| 410 | |
| 411 purgeable_zone->malloc = oom_killer_malloc_purgeable; | |
| 412 purgeable_zone->calloc = oom_killer_calloc_purgeable; | |
| 413 purgeable_zone->valloc = oom_killer_valloc_purgeable; | |
| 414 purgeable_zone->free = oom_killer_free_purgeable; | |
| 415 purgeable_zone->realloc = oom_killer_realloc_purgeable; | |
| 416 | |
| 417 if (purgeable_zone->version >= 5) { | |
| 418 g_old_memalign_purgeable = purgeable_zone->memalign; | |
| 419 if (g_old_memalign_purgeable) | |
| 420 purgeable_zone->memalign = oom_killer_memalign_purgeable; | |
| 421 } | |
| 422 } | |
| 423 | |
| 424 // Restore protection if it was active. | |
| 425 | |
| 426 if (default_reprotection_start) { | |
| 427 kern_return_t result = mach_vm_protect( | |
| 428 mach_task_self(), default_reprotection_start, | |
| 429 default_reprotection_length, false, default_reprotection_value); | |
| 430 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; | |
| 431 } | |
| 432 | |
| 433 if (purgeable_reprotection_start) { | |
| 434 kern_return_t result = mach_vm_protect( | |
| 435 mach_task_self(), purgeable_reprotection_start, | |
| 436 purgeable_reprotection_length, false, purgeable_reprotection_value); | |
| 437 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; | |
| 438 } | 379 } |
| 439 #endif | 380 #endif |
| 440 | 381 |
| 441 // === C malloc_zone_batch_malloc === | 382 // === C malloc_zone_batch_malloc === |
| 442 | 383 |
| 443 // batch_malloc is omitted because the default malloc zone's implementation | 384 // batch_malloc is omitted because the default malloc zone's implementation |
| 444 // only supports batch_malloc for "tiny" allocations from the free list. It | 385 // only supports batch_malloc for "tiny" allocations from the free list. It |
| 445 // will fail for allocations larger than "tiny", and will only allocate as | 386 // will fail for allocations larger than "tiny", and will only allocate as |
| 446 // many blocks as it's able to from the free list. These factors mean that it | 387 // many blocks as it's able to from the free list. These factors mean that it |
| 447 // can return less than the requested memory even in a non-out-of-memory | 388 // can return less than the requested memory even in a non-out-of-memory |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 517 g_old_allocWithZone = | 458 g_old_allocWithZone = |
| 518 reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method)); | 459 reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method)); |
| 519 CHECK(g_old_allocWithZone) | 460 CHECK(g_old_allocWithZone) |
| 520 << "Failed to get allocWithZone allocation function."; | 461 << "Failed to get allocWithZone allocation function."; |
| 521 method_setImplementation(orig_method, | 462 method_setImplementation(orig_method, |
| 522 reinterpret_cast<IMP>(oom_killer_allocWithZone)); | 463 reinterpret_cast<IMP>(oom_killer_allocWithZone)); |
| 523 } | 464 } |
| 524 | 465 |
| 525 } // namespace allocator | 466 } // namespace allocator |
| 526 } // namespace base | 467 } // namespace base |
| OLD | NEW |