Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1044)

Side by Side Diff: base/process/memory_mac.mm

Issue 2658083002: Revert of Move logic from memory_mac to allocator_interception_mac. (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/allocator/allocator_interception_mac.mm ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/process/memory.h" 5 #include "base/process/memory.h"
6 6
7 #include "base/allocator/allocator_interception_mac.h" 7 #include <CoreFoundation/CoreFoundation.h>
8 #import <Foundation/Foundation.h>
9 #include <errno.h>
10 #include <mach/mach.h>
11 #include <mach/mach_vm.h>
12 #include <malloc/malloc.h>
13 #import <objc/runtime.h>
14 #include <stddef.h>
15
16 #include <new>
17
18 #include "base/lazy_instance.h"
19 #include "base/logging.h"
20 #include "base/mac/mac_util.h"
21 #include "base/mac/mach_logging.h"
22 #include "base/scoped_clear_errno.h"
8 #include "build/build_config.h" 23 #include "build/build_config.h"
24 #include "third_party/apple_apsl/CFBase.h"
25 #include "third_party/apple_apsl/malloc.h"
9 26
10 namespace base { 27 namespace base {
11 28
12 void EnableTerminationOnHeapCorruption() { 29 void EnableTerminationOnHeapCorruption() {
13 #if !ARCH_CPU_64_BITS 30 #if !ARCH_CPU_64_BITS
14 DLOG(WARNING) << "EnableTerminationOnHeapCorruption only works on 64-bit"; 31 DLOG(WARNING) << "EnableTerminationOnHeapCorruption only works on 64-bit";
15 #endif 32 #endif
16 } 33 }
17 34
35 // ------------------------------------------------------------------------
36
37 namespace {
38
39 bool g_oom_killer_enabled;
40
41 #if !defined(ADDRESS_SANITIZER)
42
43 // Starting with Mac OS X 10.7, the zone allocators set up by the system are
44 // read-only, to prevent them from being overwritten in an attack. However,
45 // blindly unprotecting and reprotecting the zone allocators fails with
46 // GuardMalloc because GuardMalloc sets up its zone allocator using a block of
47 // memory in its bss. Explicit saving/restoring of the protection is required.
48 //
49 // This function takes a pointer to a malloc zone, de-protects it if necessary,
50 // and returns (in the out parameters) a region of memory (if any) to be
51 // re-protected when modifications are complete. This approach assumes that
52 // there is no contention for the protection of this memory.
53 void DeprotectMallocZone(ChromeMallocZone* default_zone,
54 mach_vm_address_t* reprotection_start,
55 mach_vm_size_t* reprotection_length,
56 vm_prot_t* reprotection_value) {
57 mach_port_t unused;
58 *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
59 struct vm_region_basic_info_64 info;
60 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
61 kern_return_t result =
62 mach_vm_region(mach_task_self(),
63 reprotection_start,
64 reprotection_length,
65 VM_REGION_BASIC_INFO_64,
66 reinterpret_cast<vm_region_info_t>(&info),
67 &count,
68 &unused);
69 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region";
70
71 // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
72 // balance it with a deallocate in case this ever changes. See 10.9.2
73 // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
74 mach_port_deallocate(mach_task_self(), unused);
75
76 // Does the region fully enclose the zone pointers? Possibly unwarranted
77 // simplification used: using the size of a full version 8 malloc zone rather
78 // than the actual smaller size if the passed-in zone is not version 8.
79 CHECK(*reprotection_start <=
80 reinterpret_cast<mach_vm_address_t>(default_zone));
81 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
82 reinterpret_cast<mach_vm_size_t>(*reprotection_start);
83 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
84
85 if (info.protection & VM_PROT_WRITE) {
86 // No change needed; the zone is already writable.
87 *reprotection_start = 0;
88 *reprotection_length = 0;
89 *reprotection_value = VM_PROT_NONE;
90 } else {
91 *reprotection_value = info.protection;
92 result = mach_vm_protect(mach_task_self(),
93 *reprotection_start,
94 *reprotection_length,
95 false,
96 info.protection | VM_PROT_WRITE);
97 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
98 }
99 }
100
101 // === C malloc/calloc/valloc/realloc/posix_memalign ===
102
103 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
104 size_t size);
105 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
106 size_t num_items,
107 size_t size);
108 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
109 size_t size);
110 typedef void (*free_type)(struct _malloc_zone_t* zone,
111 void* ptr);
112 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
113 void* ptr,
114 size_t size);
115 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
116 size_t alignment,
117 size_t size);
118
119 malloc_type g_old_malloc;
120 calloc_type g_old_calloc;
121 valloc_type g_old_valloc;
122 free_type g_old_free;
123 realloc_type g_old_realloc;
124 memalign_type g_old_memalign;
125
126 malloc_type g_old_malloc_purgeable;
127 calloc_type g_old_calloc_purgeable;
128 valloc_type g_old_valloc_purgeable;
129 free_type g_old_free_purgeable;
130 realloc_type g_old_realloc_purgeable;
131 memalign_type g_old_memalign_purgeable;
132
133 void* oom_killer_malloc(struct _malloc_zone_t* zone,
134 size_t size) {
135 void* result = g_old_malloc(zone, size);
136 if (!result && size)
137 TerminateBecauseOutOfMemory(size);
138 return result;
139 }
140
141 void* oom_killer_calloc(struct _malloc_zone_t* zone,
142 size_t num_items,
143 size_t size) {
144 void* result = g_old_calloc(zone, num_items, size);
145 if (!result && num_items && size)
146 TerminateBecauseOutOfMemory(num_items * size);
147 return result;
148 }
149
150 void* oom_killer_valloc(struct _malloc_zone_t* zone,
151 size_t size) {
152 void* result = g_old_valloc(zone, size);
153 if (!result && size)
154 TerminateBecauseOutOfMemory(size);
155 return result;
156 }
157
158 void oom_killer_free(struct _malloc_zone_t* zone,
159 void* ptr) {
160 g_old_free(zone, ptr);
161 }
162
163 void* oom_killer_realloc(struct _malloc_zone_t* zone,
164 void* ptr,
165 size_t size) {
166 void* result = g_old_realloc(zone, ptr, size);
167 if (!result && size)
168 TerminateBecauseOutOfMemory(size);
169 return result;
170 }
171
172 void* oom_killer_memalign(struct _malloc_zone_t* zone,
173 size_t alignment,
174 size_t size) {
175 void* result = g_old_memalign(zone, alignment, size);
176 // Only die if posix_memalign would have returned ENOMEM, since there are
177 // other reasons why NULL might be returned (see
178 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
179 if (!result && size && alignment >= sizeof(void*) &&
180 (alignment & (alignment - 1)) == 0) {
181 TerminateBecauseOutOfMemory(size);
182 }
183 return result;
184 }
185
186 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
187 size_t size) {
188 void* result = g_old_malloc_purgeable(zone, size);
189 if (!result && size)
190 TerminateBecauseOutOfMemory(size);
191 return result;
192 }
193
194 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
195 size_t num_items,
196 size_t size) {
197 void* result = g_old_calloc_purgeable(zone, num_items, size);
198 if (!result && num_items && size)
199 TerminateBecauseOutOfMemory(num_items * size);
200 return result;
201 }
202
203 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
204 size_t size) {
205 void* result = g_old_valloc_purgeable(zone, size);
206 if (!result && size)
207 TerminateBecauseOutOfMemory(size);
208 return result;
209 }
210
211 void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
212 void* ptr) {
213 g_old_free_purgeable(zone, ptr);
214 }
215
216 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
217 void* ptr,
218 size_t size) {
219 void* result = g_old_realloc_purgeable(zone, ptr, size);
220 if (!result && size)
221 TerminateBecauseOutOfMemory(size);
222 return result;
223 }
224
225 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
226 size_t alignment,
227 size_t size) {
228 void* result = g_old_memalign_purgeable(zone, alignment, size);
229 // Only die if posix_memalign would have returned ENOMEM, since there are
230 // other reasons why NULL might be returned (see
231 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
232 if (!result && size && alignment >= sizeof(void*)
233 && (alignment & (alignment - 1)) == 0) {
234 TerminateBecauseOutOfMemory(size);
235 }
236 return result;
237 }
238
239 #endif // !defined(ADDRESS_SANITIZER)
240
241 // === C++ operator new ===
242
243 void oom_killer_new() {
244 TerminateBecauseOutOfMemory(0);
245 }
246
247 #if !defined(ADDRESS_SANITIZER)
248
249 // === Core Foundation CFAllocators ===
250
251 bool CanGetContextForCFAllocator() {
252 return !base::mac::IsOSLaterThan10_12_DontCallThis();
253 }
254
255 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
256 ChromeCFAllocatorLions* our_allocator =
257 const_cast<ChromeCFAllocatorLions*>(
258 reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
259 return &our_allocator->_context;
260 }
261
262 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
263 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
264 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
265
266 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
267 CFOptionFlags hint,
268 void* info) {
269 void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
270 if (!result)
271 TerminateBecauseOutOfMemory(alloc_size);
272 return result;
273 }
274
275 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
276 CFOptionFlags hint,
277 void* info) {
278 void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
279 if (!result)
280 TerminateBecauseOutOfMemory(alloc_size);
281 return result;
282 }
283
284 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
285 CFOptionFlags hint,
286 void* info) {
287 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
288 if (!result)
289 TerminateBecauseOutOfMemory(alloc_size);
290 return result;
291 }
292
293 #endif // !defined(ADDRESS_SANITIZER)
294
295 // === Cocoa NSObject allocation ===
296
297 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
298 allocWithZone_t g_old_allocWithZone;
299
300 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
301 {
302 id result = g_old_allocWithZone(self, _cmd, zone);
303 if (!result)
304 TerminateBecauseOutOfMemory(0);
305 return result;
306 }
307
308 } // namespace
309
18 bool UncheckedMalloc(size_t size, void** result) { 310 bool UncheckedMalloc(size_t size, void** result) {
19 return allocator::UncheckedMallocMac(size, result); 311 #if defined(ADDRESS_SANITIZER)
312 *result = malloc(size);
313 #else
314 if (g_old_malloc) {
315 *result = g_old_malloc(malloc_default_zone(), size);
316 } else {
317 *result = malloc(size);
318 }
319 #endif // defined(ADDRESS_SANITIZER)
320
321 return *result != NULL;
20 } 322 }
21 323
22 bool UncheckedCalloc(size_t num_items, size_t size, void** result) { 324 bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
23 return allocator::UncheckedCallocMac(num_items, size, result); 325 #if defined(ADDRESS_SANITIZER)
326 *result = calloc(num_items, size);
327 #else
328 if (g_old_calloc) {
329 *result = g_old_calloc(malloc_default_zone(), num_items, size);
330 } else {
331 *result = calloc(num_items, size);
332 }
333 #endif // defined(ADDRESS_SANITIZER)
334
335 return *result != NULL;
336 }
337
338 void* UncheckedMalloc(size_t size) {
339 void* address;
340 return UncheckedMalloc(size, &address) ? address : NULL;
341 }
342
343 void* UncheckedCalloc(size_t num_items, size_t size) {
344 void* address;
345 return UncheckedCalloc(num_items, size, &address) ? address : NULL;
24 } 346 }
25 347
26 void EnableTerminationOnOutOfMemory() { 348 void EnableTerminationOnOutOfMemory() {
27 allocator::InterceptAllocationsMac(); 349 if (g_oom_killer_enabled)
350 return;
351
352 g_oom_killer_enabled = true;
353
354 // === C malloc/calloc/valloc/realloc/posix_memalign ===
355
356 // This approach is not perfect, as requests for amounts of memory larger than
357 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
358 // still fail with a NULL rather than dying (see
359 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
360 // Unfortunately, it's the best we can do. Also note that this does not affect
361 // allocations from non-default zones.
362
363 #if !defined(ADDRESS_SANITIZER)
364 // Don't do anything special on OOM for the malloc zones replaced by
365 // AddressSanitizer, as modifying or protecting them may not work correctly.
366
367 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
368 !g_old_memalign) << "Old allocators unexpectedly non-null";
369
370 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
371 !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
372 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
373
374 ChromeMallocZone* default_zone =
375 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
376 ChromeMallocZone* purgeable_zone =
377 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
378
379 mach_vm_address_t default_reprotection_start = 0;
380 mach_vm_size_t default_reprotection_length = 0;
381 vm_prot_t default_reprotection_value = VM_PROT_NONE;
382 DeprotectMallocZone(default_zone,
383 &default_reprotection_start,
384 &default_reprotection_length,
385 &default_reprotection_value);
386
387 mach_vm_address_t purgeable_reprotection_start = 0;
388 mach_vm_size_t purgeable_reprotection_length = 0;
389 vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
390 if (purgeable_zone) {
391 DeprotectMallocZone(purgeable_zone,
392 &purgeable_reprotection_start,
393 &purgeable_reprotection_length,
394 &purgeable_reprotection_value);
395 }
396
397 // Default zone
398
399 g_old_malloc = default_zone->malloc;
400 g_old_calloc = default_zone->calloc;
401 g_old_valloc = default_zone->valloc;
402 g_old_free = default_zone->free;
403 g_old_realloc = default_zone->realloc;
404 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
405 g_old_realloc)
406 << "Failed to get system allocation functions.";
407
408 default_zone->malloc = oom_killer_malloc;
409 default_zone->calloc = oom_killer_calloc;
410 default_zone->valloc = oom_killer_valloc;
411 default_zone->free = oom_killer_free;
412 default_zone->realloc = oom_killer_realloc;
413
414 if (default_zone->version >= 5) {
415 g_old_memalign = default_zone->memalign;
416 if (g_old_memalign)
417 default_zone->memalign = oom_killer_memalign;
418 }
419
420 // Purgeable zone (if it exists)
421
422 if (purgeable_zone) {
423 g_old_malloc_purgeable = purgeable_zone->malloc;
424 g_old_calloc_purgeable = purgeable_zone->calloc;
425 g_old_valloc_purgeable = purgeable_zone->valloc;
426 g_old_free_purgeable = purgeable_zone->free;
427 g_old_realloc_purgeable = purgeable_zone->realloc;
428 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
429 g_old_valloc_purgeable && g_old_free_purgeable &&
430 g_old_realloc_purgeable)
431 << "Failed to get system allocation functions.";
432
433 purgeable_zone->malloc = oom_killer_malloc_purgeable;
434 purgeable_zone->calloc = oom_killer_calloc_purgeable;
435 purgeable_zone->valloc = oom_killer_valloc_purgeable;
436 purgeable_zone->free = oom_killer_free_purgeable;
437 purgeable_zone->realloc = oom_killer_realloc_purgeable;
438
439 if (purgeable_zone->version >= 5) {
440 g_old_memalign_purgeable = purgeable_zone->memalign;
441 if (g_old_memalign_purgeable)
442 purgeable_zone->memalign = oom_killer_memalign_purgeable;
443 }
444 }
445
446 // Restore protection if it was active.
447
448 if (default_reprotection_start) {
449 kern_return_t result = mach_vm_protect(mach_task_self(),
450 default_reprotection_start,
451 default_reprotection_length,
452 false,
453 default_reprotection_value);
454 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
455 }
456
457 if (purgeable_reprotection_start) {
458 kern_return_t result = mach_vm_protect(mach_task_self(),
459 purgeable_reprotection_start,
460 purgeable_reprotection_length,
461 false,
462 purgeable_reprotection_value);
463 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
464 }
465 #endif
466
467 // === C malloc_zone_batch_malloc ===
468
469 // batch_malloc is omitted because the default malloc zone's implementation
470 // only supports batch_malloc for "tiny" allocations from the free list. It
471 // will fail for allocations larger than "tiny", and will only allocate as
472 // many blocks as it's able to from the free list. These factors mean that it
473 // can return less than the requested memory even in a non-out-of-memory
474 // situation. There's no good way to detect whether a batch_malloc failure is
475 // due to these other factors, or due to genuine memory or address space
476 // exhaustion. The fact that it only allocates space from the "tiny" free list
477 // means that it's likely that a failure will not be due to memory exhaustion.
478 // Similarly, these constraints on batch_malloc mean that callers must always
479 // be expecting to receive less memory than was requested, even in situations
480 // where memory pressure is not a concern. Finally, the only public interface
481 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
482 // system's malloc implementation. It's unlikely that anyone's even heard of
483 // it.
484
485 // === C++ operator new ===
486
487 // Yes, operator new does call through to malloc, but this will catch failures
488 // that our imperfect handling of malloc cannot.
489
490 std::set_new_handler(oom_killer_new);
491
492 #ifndef ADDRESS_SANITIZER
493 // === Core Foundation CFAllocators ===
494
495 // This will not catch allocation done by custom allocators, but will catch
496 // all allocation done by system-provided ones.
497
498 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
499 !g_old_cfallocator_malloc_zone)
500 << "Old allocators unexpectedly non-null";
501
502 bool cf_allocator_internals_known = CanGetContextForCFAllocator();
503
504 if (cf_allocator_internals_known) {
505 CFAllocatorContext* context =
506 ContextForCFAllocator(kCFAllocatorSystemDefault);
507 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
508 g_old_cfallocator_system_default = context->allocate;
509 CHECK(g_old_cfallocator_system_default)
510 << "Failed to get kCFAllocatorSystemDefault allocation function.";
511 context->allocate = oom_killer_cfallocator_system_default;
512
513 context = ContextForCFAllocator(kCFAllocatorMalloc);
514 CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
515 g_old_cfallocator_malloc = context->allocate;
516 CHECK(g_old_cfallocator_malloc)
517 << "Failed to get kCFAllocatorMalloc allocation function.";
518 context->allocate = oom_killer_cfallocator_malloc;
519
520 context = ContextForCFAllocator(kCFAllocatorMallocZone);
521 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
522 g_old_cfallocator_malloc_zone = context->allocate;
523 CHECK(g_old_cfallocator_malloc_zone)
524 << "Failed to get kCFAllocatorMallocZone allocation function.";
525 context->allocate = oom_killer_cfallocator_malloc_zone;
526 } else {
527 DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
528 "failures via CFAllocator will not result in termination. "
529 "http://crbug.com/45650";
530 }
531 #endif
532
533 // === Cocoa NSObject allocation ===
534
535 // Note that both +[NSObject new] and +[NSObject alloc] call through to
536 // +[NSObject allocWithZone:].
537
538 CHECK(!g_old_allocWithZone)
539 << "Old allocator unexpectedly non-null";
540
541 Class nsobject_class = [NSObject class];
542 Method orig_method = class_getClassMethod(nsobject_class,
543 @selector(allocWithZone:));
544 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
545 method_getImplementation(orig_method));
546 CHECK(g_old_allocWithZone)
547 << "Failed to get allocWithZone allocation function.";
548 method_setImplementation(orig_method,
549 reinterpret_cast<IMP>(oom_killer_allocWithZone));
28 } 550 }
29 551
30 } // namespace base 552 } // namespace base
OLDNEW
« no previous file with comments | « base/allocator/allocator_interception_mac.mm ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698