OLD | NEW |
---|---|
1 // Copyright (c) 2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2008 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 #include "base/process_util.h" | 6 #include "base/process_util.h" |
7 | 7 |
8 #import <Cocoa/Cocoa.h> | 8 #import <Cocoa/Cocoa.h> |
9 #include <crt_externs.h> | 9 #include <crt_externs.h> |
10 #include <mach/mach.h> | 10 #include <mach/mach.h> |
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
350 | 350 |
351 return (data.active_count * page_size) / 1024; | 351 return (data.active_count * page_size) / 1024; |
352 } | 352 } |
353 | 353 |
354 // ------------------------------------------------------------------------ | 354 // ------------------------------------------------------------------------ |
355 | 355 |
356 namespace { | 356 namespace { |
357 | 357 |
358 bool g_oom_killer_enabled; | 358 bool g_oom_killer_enabled; |
359 | 359 |
360 // === C malloc/calloc/valloc/realloc === | 360 // === C malloc/calloc/valloc/realloc/posix_memalign === |
361 | |
362 // The extended version of malloc_zone_t from the 10.6 SDK's <malloc/malloc.h>, | |
363 // included here to allow for compilation in 10.5. (10.5 has version 3 zone | |
364 // allocators, while 10.6 has version 6 allocators.) | |
365 struct ChromeMallocZone { | |
366 void* reserved1; | |
367 void* reserved2; | |
368 size_t (*size)(struct _malloc_zone_t* zone, const void* ptr); | |
369 void* (*malloc)(struct _malloc_zone_t* zone, size_t size); | |
370 void* (*calloc)(struct _malloc_zone_t* zone, size_t num_items, size_t size); | |
371 void* (*valloc)(struct _malloc_zone_t* zone, size_t size); | |
372 void (*free)(struct _malloc_zone_t* zone, void* ptr); | |
373 void* (*realloc)(struct _malloc_zone_t* zone, void* ptr, size_t size); | |
374 void (*destroy)(struct _malloc_zone_t* zone); | |
375 const char* zone_name; | |
376 unsigned (*batch_malloc)(struct _malloc_zone_t* zone, size_t size, | |
377 void** results, unsigned num_requested); | |
378 void (*batch_free)(struct _malloc_zone_t* zone, void** to_be_freed, | |
379 unsigned num_to_be_freed); | |
380 struct malloc_introspection_t* introspect; | |
381 unsigned version; | |
382 void* (*memalign)(struct _malloc_zone_t* zone, size_t alignment, | |
383 size_t size); // version >= 5 | |
384 void (*free_definite_size)(struct _malloc_zone_t* zone, void* ptr, | |
385 size_t size); // version >= 6 | |
386 }; | |
361 | 387 |
362 typedef void* (*malloc_type)(struct _malloc_zone_t* zone, | 388 typedef void* (*malloc_type)(struct _malloc_zone_t* zone, |
363 size_t size); | 389 size_t size); |
364 typedef void* (*calloc_type)(struct _malloc_zone_t* zone, | 390 typedef void* (*calloc_type)(struct _malloc_zone_t* zone, |
365 size_t num_items, | 391 size_t num_items, |
366 size_t size); | 392 size_t size); |
367 typedef void* (*valloc_type)(struct _malloc_zone_t* zone, | 393 typedef void* (*valloc_type)(struct _malloc_zone_t* zone, |
368 size_t size); | 394 size_t size); |
369 typedef void* (*realloc_type)(struct _malloc_zone_t* zone, | 395 typedef void* (*realloc_type)(struct _malloc_zone_t* zone, |
370 void* ptr, | 396 void* ptr, |
371 size_t size); | 397 size_t size); |
398 typedef void* (*memalign_type)(struct _malloc_zone_t* zone, | |
399 size_t alignment, | |
400 size_t size); | |
372 | 401 |
373 malloc_type g_old_malloc; | 402 malloc_type g_old_malloc; |
374 calloc_type g_old_calloc; | 403 calloc_type g_old_calloc; |
375 valloc_type g_old_valloc; | 404 valloc_type g_old_valloc; |
376 realloc_type g_old_realloc; | 405 realloc_type g_old_realloc; |
406 memalign_type g_old_memalign; | |
377 | 407 |
378 void* oom_killer_malloc(struct _malloc_zone_t* zone, | 408 void* oom_killer_malloc(struct _malloc_zone_t* zone, |
379 size_t size) { | 409 size_t size) { |
380 void* result = g_old_malloc(zone, size); | 410 void* result = g_old_malloc(zone, size); |
381 if (size && !result) | 411 if (!result && size) |
382 DebugUtil::BreakDebugger(); | 412 DebugUtil::BreakDebugger(); |
383 return result; | 413 return result; |
384 } | 414 } |
385 | 415 |
386 void* oom_killer_calloc(struct _malloc_zone_t* zone, | 416 void* oom_killer_calloc(struct _malloc_zone_t* zone, |
387 size_t num_items, | 417 size_t num_items, |
388 size_t size) { | 418 size_t size) { |
389 void* result = g_old_calloc(zone, num_items, size); | 419 void* result = g_old_calloc(zone, num_items, size); |
390 if (num_items && size && !result) | 420 if (!result && num_items && size) |
391 DebugUtil::BreakDebugger(); | 421 DebugUtil::BreakDebugger(); |
392 return result; | 422 return result; |
393 } | 423 } |
394 | 424 |
395 void* oom_killer_valloc(struct _malloc_zone_t* zone, | 425 void* oom_killer_valloc(struct _malloc_zone_t* zone, |
396 size_t size) { | 426 size_t size) { |
397 void* result = g_old_valloc(zone, size); | 427 void* result = g_old_valloc(zone, size); |
398 if (size && !result) | 428 if (!result && size) |
399 DebugUtil::BreakDebugger(); | 429 DebugUtil::BreakDebugger(); |
400 return result; | 430 return result; |
401 } | 431 } |
402 | 432 |
403 void* oom_killer_realloc(struct _malloc_zone_t* zone, | 433 void* oom_killer_realloc(struct _malloc_zone_t* zone, |
404 void* ptr, | 434 void* ptr, |
405 size_t size) { | 435 size_t size) { |
406 void* result = g_old_realloc(zone, ptr, size); | 436 void* result = g_old_realloc(zone, ptr, size); |
407 if (size && !result) | 437 if (!result && size) |
438 DebugUtil::BreakDebugger(); | |
439 return result; | |
440 } | |
441 | |
442 void* oom_killer_memalign(struct _malloc_zone_t* zone, | |
443 size_t alignment, | |
444 size_t size) { | |
445 void* result = g_old_memalign(zone, alignment, size); | |
446 // Only die if posix_memalign would have returned ENOMEM, since there are | |
447 // other reasons why NULL might be returned (see | |
448 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). | |
449 if (!result && size && alignment >= sizeof(void*) | |
450 && 0 == (alignment & (alignment - 1))) | |
Mark Mentovai
2010/05/19 20:11:17
Use {}. That rule should be applied if the conditi
Mark Mentovai
2010/05/19 20:11:17
Googlers generally write |expression == 0| and not
| |
408 DebugUtil::BreakDebugger(); | 451 DebugUtil::BreakDebugger(); |
409 return result; | 452 return result; |
410 } | 453 } |
411 | 454 |
412 // === C++ operator new === | 455 // === C++ operator new === |
413 | 456 |
414 void oom_killer_new() { | 457 void oom_killer_new() { |
415 DebugUtil::BreakDebugger(); | 458 DebugUtil::BreakDebugger(); |
416 } | 459 } |
417 | 460 |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
495 } | 538 } |
496 | 539 |
497 } // namespace | 540 } // namespace |
498 | 541 |
499 void EnableTerminationOnOutOfMemory() { | 542 void EnableTerminationOnOutOfMemory() { |
500 if (g_oom_killer_enabled) | 543 if (g_oom_killer_enabled) |
501 return; | 544 return; |
502 | 545 |
503 g_oom_killer_enabled = true; | 546 g_oom_killer_enabled = true; |
504 | 547 |
505 // === C malloc/calloc/valloc/realloc === | 548 // === C malloc/calloc/valloc/realloc/posix_memalign === |
506 | 549 |
507 // This approach is not perfect, as requests for amounts of memory larger than | 550 // This approach is not perfect, as requests for amounts of memory larger than |
508 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will | 551 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will |
509 // still fail with a NULL rather than dying (see | 552 // still fail with a NULL rather than dying (see |
510 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). | 553 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). |
511 // Unfortunately, it's the best we can do. Also note that this does not affect | 554 // Unfortunately, it's the best we can do. Also note that this does not affect |
512 // allocations from non-default zones. | 555 // allocations from non-default zones. |
513 | 556 |
514 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc) | 557 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc && |
515 << "Old allocators unexpectedly non-null"; | 558 !g_old_memalign) << "Old allocators unexpectedly non-null"; |
516 | 559 |
517 int32 major; | 560 int32 major; |
518 int32 minor; | 561 int32 minor; |
519 int32 bugfix; | 562 int32 bugfix; |
520 SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix); | 563 SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix); |
521 bool zone_allocators_protected = ((major == 10 && minor > 6) || major > 10); | 564 bool zone_allocators_protected = ((major == 10 && minor > 6) || major > 10); |
522 | 565 |
523 malloc_zone_t* default_zone = malloc_default_zone(); | 566 ChromeMallocZone* default_zone = |
567 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); | |
524 | 568 |
525 vm_address_t page_start = NULL; | 569 vm_address_t page_start = NULL; |
526 vm_size_t len = 0; | 570 vm_size_t len = 0; |
527 if (zone_allocators_protected) { | 571 if (zone_allocators_protected) { |
528 // See http://trac.webkit.org/changeset/53362/trunk/WebKitTools/DumpRenderTr ee/mac | 572 // See http://trac.webkit.org/changeset/53362/trunk/WebKitTools/DumpRenderTr ee/mac |
529 page_start = reinterpret_cast<vm_address_t>(default_zone) & | 573 page_start = reinterpret_cast<vm_address_t>(default_zone) & |
530 static_cast<vm_size_t>(~(getpagesize() - 1)); | 574 static_cast<vm_size_t>(~(getpagesize() - 1)); |
531 len = reinterpret_cast<vm_address_t>(default_zone) - | 575 len = reinterpret_cast<vm_address_t>(default_zone) - |
532 page_start + sizeof(malloc_zone_t); | 576 page_start + sizeof(malloc_zone_t); |
533 mprotect(reinterpret_cast<void*>(page_start), len, PROT_READ | PROT_WRITE); | 577 mprotect(reinterpret_cast<void*>(page_start), len, PROT_READ | PROT_WRITE); |
534 } | 578 } |
535 | 579 |
536 g_old_malloc = default_zone->malloc; | 580 g_old_malloc = default_zone->malloc; |
537 g_old_calloc = default_zone->calloc; | 581 g_old_calloc = default_zone->calloc; |
538 g_old_valloc = default_zone->valloc; | 582 g_old_valloc = default_zone->valloc; |
539 g_old_realloc = default_zone->realloc; | 583 g_old_realloc = default_zone->realloc; |
540 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc) | 584 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc) |
541 << "Failed to get system allocation functions."; | 585 << "Failed to get system allocation functions."; |
542 | 586 |
543 default_zone->malloc = oom_killer_malloc; | 587 default_zone->malloc = oom_killer_malloc; |
544 default_zone->calloc = oom_killer_calloc; | 588 default_zone->calloc = oom_killer_calloc; |
545 default_zone->valloc = oom_killer_valloc; | 589 default_zone->valloc = oom_killer_valloc; |
546 default_zone->realloc = oom_killer_realloc; | 590 default_zone->realloc = oom_killer_realloc; |
547 | 591 |
592 if (default_zone->version >= 5) { | |
593 g_old_memalign = default_zone->memalign; | |
594 if (g_old_memalign) | |
595 default_zone->memalign = oom_killer_memalign; | |
596 } | |
597 | |
548 if (zone_allocators_protected) { | 598 if (zone_allocators_protected) { |
549 mprotect(reinterpret_cast<void*>(page_start), len, PROT_READ); | 599 mprotect(reinterpret_cast<void*>(page_start), len, PROT_READ); |
550 } | 600 } |
551 | 601 |
602 // === C malloc_zone_batch_malloc === | |
603 | |
604 // batch_malloc is omitted because the default malloc zone's implementation | |
605 // only supports batch_malloc for "tiny" allocations from the free list. It | |
606 // will fail for allocations larger than "tiny", and will only allocate as | |
607 // many blocks as it's able to from the free list. These factors mean that it | |
608 // can return less than the requested memory even in a non-out-of-memory | |
609 // situation. There's no good way to detect whether a batch_malloc failure is | |
610 // due to these other factors, or due to genuine memory or address space | |
611 // exhaustion. The fact that it only allocates space from the "tiny" free list | |
612 // means that it's likely that a failure will not be due to memory exhaustion. | |
613 // Similarly, these constraints on batch_malloc mean that callers must always | |
614 // be expecting to receive less memory than was requested, even in situations | |
615 // where memory pressure is not a concern. Finally, the only public interface | |
616 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the | |
617 // system's malloc implementation. It's unlikely that anyone's even heard of | |
618 // it. | |
619 | |
552 // === C++ operator new === | 620 // === C++ operator new === |
553 | 621 |
554 // Yes, operator new does call through to malloc, but this will catch failures | 622 // Yes, operator new does call through to malloc, but this will catch failures |
555 // that our imperfect handling of malloc cannot. | 623 // that our imperfect handling of malloc cannot. |
556 | 624 |
557 std::set_new_handler(oom_killer_new); | 625 std::set_new_handler(oom_killer_new); |
558 | 626 |
559 // === Core Foundation CFAllocators === | 627 // === Core Foundation CFAllocators === |
560 | 628 |
561 // This will not catch allocation done by custom allocators, but will catch | 629 // This will not catch allocation done by custom allocators, but will catch |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
599 @selector(allocWithZone:)); | 667 @selector(allocWithZone:)); |
600 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>( | 668 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>( |
601 method_getImplementation(orig_method)); | 669 method_getImplementation(orig_method)); |
602 CHECK(g_old_allocWithZone) | 670 CHECK(g_old_allocWithZone) |
603 << "Failed to get allocWithZone allocation function."; | 671 << "Failed to get allocWithZone allocation function."; |
604 method_setImplementation(orig_method, | 672 method_setImplementation(orig_method, |
605 reinterpret_cast<IMP>(oom_killer_allocWithZone)); | 673 reinterpret_cast<IMP>(oom_killer_allocWithZone)); |
606 } | 674 } |
607 | 675 |
608 } // namespace base | 676 } // namespace base |
OLD | NEW |