Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Side by Side Diff: base/process/memory_mac.mm

Issue 278923002: Use the new ScopedMachVM class and the MACH_LOG family of logging macros (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/process/memory.h" 5 #include "base/process/memory.h"
6 6
7 #include <CoreFoundation/CoreFoundation.h> 7 #include <CoreFoundation/CoreFoundation.h>
8 #include <errno.h> 8 #include <errno.h>
9 #include <mach/mach.h> 9 #include <mach/mach.h>
10 #include <mach/mach_vm.h> 10 #include <mach/mach_vm.h>
11 #include <malloc/malloc.h> 11 #include <malloc/malloc.h>
12 #import <objc/runtime.h> 12 #import <objc/runtime.h>
13 13
14 #include <new> 14 #include <new>
15 15
16 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
17 #include "base/logging.h" 17 #include "base/logging.h"
18 #include "base/mac/mac_util.h" 18 #include "base/mac/mac_util.h"
19 #include "base/mac/mach_logging.h"
19 #include "base/scoped_clear_errno.h" 20 #include "base/scoped_clear_errno.h"
20 #include "third_party/apple_apsl/CFBase.h" 21 #include "third_party/apple_apsl/CFBase.h"
21 #include "third_party/apple_apsl/malloc.h" 22 #include "third_party/apple_apsl/malloc.h"
22 23
23 #if ARCH_CPU_32_BITS 24 #if ARCH_CPU_32_BITS
24 #include <dlfcn.h> 25 #include <dlfcn.h>
25 #include <mach-o/nlist.h> 26 #include <mach-o/nlist.h>
26 27
27 #include "base/threading/thread_local.h" 28 #include "base/threading/thread_local.h"
28 #include "third_party/mach_override/mach_override.h" 29 #include "third_party/mach_override/mach_override.h"
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 struct vm_region_basic_info_64 info; 216 struct vm_region_basic_info_64 info;
216 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; 217 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
217 kern_return_t result = 218 kern_return_t result =
218 mach_vm_region(mach_task_self(), 219 mach_vm_region(mach_task_self(),
219 reprotection_start, 220 reprotection_start,
220 reprotection_length, 221 reprotection_length,
221 VM_REGION_BASIC_INFO_64, 222 VM_REGION_BASIC_INFO_64,
222 reinterpret_cast<vm_region_info_t>(&info), 223 reinterpret_cast<vm_region_info_t>(&info),
223 &count, 224 &count,
224 &unused); 225 &unused);
225 CHECK(result == KERN_SUCCESS); 226 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region";
226 227
227 result = mach_port_deallocate(mach_task_self(), unused); 228 mach_port_deallocate(mach_task_self(), unused);
Robert Sesek 2014/05/09 20:40:01 Technically this _can_ fail. But it's not likely.
Mark Mentovai 2014/05/09 21:09:35 rsesek wrote:
228 CHECK(result == KERN_SUCCESS);
229 229
230 // Does the region fully enclose the zone pointers? Possibly unwarranted 230 // Does the region fully enclose the zone pointers? Possibly unwarranted
231 // simplification used: using the size of a full version 8 malloc zone rather 231 // simplification used: using the size of a full version 8 malloc zone rather
232 // than the actual smaller size if the passed-in zone is not version 8. 232 // than the actual smaller size if the passed-in zone is not version 8.
233 CHECK(*reprotection_start <= 233 CHECK(*reprotection_start <=
234 reinterpret_cast<mach_vm_address_t>(default_zone)); 234 reinterpret_cast<mach_vm_address_t>(default_zone));
235 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) - 235 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
236 reinterpret_cast<mach_vm_size_t>(*reprotection_start); 236 reinterpret_cast<mach_vm_size_t>(*reprotection_start);
237 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length); 237 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
238 238
239 if (info.protection & VM_PROT_WRITE) { 239 if (info.protection & VM_PROT_WRITE) {
240 // No change needed; the zone is already writable. 240 // No change needed; the zone is already writable.
241 *reprotection_start = 0; 241 *reprotection_start = 0;
242 *reprotection_length = 0; 242 *reprotection_length = 0;
243 *reprotection_value = VM_PROT_NONE; 243 *reprotection_value = VM_PROT_NONE;
244 } else { 244 } else {
245 *reprotection_value = info.protection; 245 *reprotection_value = info.protection;
246 result = mach_vm_protect(mach_task_self(), 246 result = mach_vm_protect(mach_task_self(),
247 *reprotection_start, 247 *reprotection_start,
248 *reprotection_length, 248 *reprotection_length,
249 false, 249 false,
250 info.protection | VM_PROT_WRITE); 250 info.protection | VM_PROT_WRITE);
251 CHECK(result == KERN_SUCCESS); 251 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
252 } 252 }
253 } 253 }
254 254
255 // === C malloc/calloc/valloc/realloc/posix_memalign === 255 // === C malloc/calloc/valloc/realloc/posix_memalign ===
256 256
257 typedef void* (*malloc_type)(struct _malloc_zone_t* zone, 257 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
258 size_t size); 258 size_t size);
259 typedef void* (*calloc_type)(struct _malloc_zone_t* zone, 259 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
260 size_t num_items, 260 size_t num_items,
261 size_t size); 261 size_t size);
(...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after
639 } 639 }
640 640
641 // Restore protection if it was active. 641 // Restore protection if it was active.
642 642
643 if (default_reprotection_start) { 643 if (default_reprotection_start) {
644 kern_return_t result = mach_vm_protect(mach_task_self(), 644 kern_return_t result = mach_vm_protect(mach_task_self(),
645 default_reprotection_start, 645 default_reprotection_start,
646 default_reprotection_length, 646 default_reprotection_length,
647 false, 647 false,
648 default_reprotection_value); 648 default_reprotection_value);
649 CHECK(result == KERN_SUCCESS); 649 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
650 } 650 }
651 651
652 if (purgeable_reprotection_start) { 652 if (purgeable_reprotection_start) {
653 kern_return_t result = mach_vm_protect(mach_task_self(), 653 kern_return_t result = mach_vm_protect(mach_task_self(),
654 purgeable_reprotection_start, 654 purgeable_reprotection_start,
655 purgeable_reprotection_length, 655 purgeable_reprotection_length,
656 false, 656 false,
657 purgeable_reprotection_value); 657 purgeable_reprotection_value);
658 CHECK(result == KERN_SUCCESS); 658 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
659 } 659 }
660 #endif 660 #endif
661 661
662 // === C malloc_zone_batch_malloc === 662 // === C malloc_zone_batch_malloc ===
663 663
664 // batch_malloc is omitted because the default malloc zone's implementation 664 // batch_malloc is omitted because the default malloc zone's implementation
665 // only supports batch_malloc for "tiny" allocations from the free list. It 665 // only supports batch_malloc for "tiny" allocations from the free list. It
666 // will fail for allocations larger than "tiny", and will only allocate as 666 // will fail for allocations larger than "tiny", and will only allocate as
667 // many blocks as it's able to from the free list. These factors mean that it 667 // many blocks as it's able to from the free list. These factors mean that it
668 // can return less than the requested memory even in a non-out-of-memory 668 // can return less than the requested memory even in a non-out-of-memory
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 @selector(allocWithZone:)); 737 @selector(allocWithZone:));
738 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>( 738 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
739 method_getImplementation(orig_method)); 739 method_getImplementation(orig_method));
740 CHECK(g_old_allocWithZone) 740 CHECK(g_old_allocWithZone)
741 << "Failed to get allocWithZone allocation function."; 741 << "Failed to get allocWithZone allocation function.";
742 method_setImplementation(orig_method, 742 method_setImplementation(orig_method,
743 reinterpret_cast<IMP>(oom_killer_allocWithZone)); 743 reinterpret_cast<IMP>(oom_killer_allocWithZone));
744 } 744 }
745 745
746 } // namespace base 746 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698