OLD | NEW |
1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
2 * All rights reserved. | 2 * All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
104 #include <inttypes.h> | 104 #include <inttypes.h> |
105 #endif | 105 #endif |
106 #ifdef HAVE_MMAP | 106 #ifdef HAVE_MMAP |
107 #include <sys/mman.h> | 107 #include <sys/mman.h> |
108 #elif !defined(MAP_FAILED) | 108 #elif !defined(MAP_FAILED) |
109 #define MAP_FAILED -1 // the only thing we need from mman.h | 109 #define MAP_FAILED -1 // the only thing we need from mman.h |
110 #endif | 110 #endif |
111 #ifdef HAVE_PTHREAD | 111 #ifdef HAVE_PTHREAD |
112 #include <pthread.h> // for pthread_t, pthread_self() | 112 #include <pthread.h> // for pthread_t, pthread_self() |
113 #endif | 113 #endif |
| 114 #include <stddef.h> |
114 | 115 |
115 #include <algorithm> | 116 #include <algorithm> |
116 #include <set> | 117 #include <set> |
117 | 118 |
118 #include "memory_region_map.h" | 119 #include "memory_region_map.h" |
119 | 120 |
120 #include "base/linux_syscall_support.h" | |
121 #include "base/logging.h" | 121 #include "base/logging.h" |
122 #include "base/low_level_alloc.h" | 122 #include "base/low_level_alloc.h" |
123 #include "malloc_hook-inl.h" | 123 #include "malloc_hook-inl.h" |
124 | 124 |
125 #include <google/stacktrace.h> | 125 #include <google/stacktrace.h> |
126 #include <google/malloc_hook.h> | 126 #include <google/malloc_hook.h> |
127 | 127 |
128 // MREMAP_FIXED is a linux extension. How it's used in this file, | 128 // MREMAP_FIXED is a linux extension. How it's used in this file, |
129 // setting it to 0 is equivalent to saying, "This feature isn't | 129 // setting it to 0 is equivalent to saying, "This feature isn't |
130 // supported", which is right. | 130 // supported", which is right. |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
188 "need to increase kMaxStackDepth?"); | 188 "need to increase kMaxStackDepth?"); |
189 Lock(); | 189 Lock(); |
190 client_count_ += 1; | 190 client_count_ += 1; |
191 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); | 191 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); |
192 if (client_count_ > 1) { | 192 if (client_count_ > 1) { |
193 // not first client: already did initialization-proper | 193 // not first client: already did initialization-proper |
194 Unlock(); | 194 Unlock(); |
195 RAW_VLOG(10, "MemoryRegionMap Init increment done"); | 195 RAW_VLOG(10, "MemoryRegionMap Init increment done"); |
196 return; | 196 return; |
197 } | 197 } |
198 // Set our hooks and make sure no other hooks existed: | 198 // Set our hooks and make sure they were installed: |
199 if (MallocHook::SetMmapHook(MmapHook) != NULL || | 199 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), ""); |
200 MallocHook::SetMremapHook(MremapHook) != NULL || | 200 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), ""); |
201 MallocHook::SetSbrkHook(SbrkHook) != NULL || | 201 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), ""); |
202 MallocHook::SetMunmapHook(MunmapHook) != NULL) { | 202 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), ""); |
203 RAW_LOG(FATAL, "Had other mmap/mremap/munmap/sbrk MallocHook-s set. " | |
204 "Make sure only one of MemoryRegionMap and the other " | |
205 "client is active."); | |
206 } | |
207 // We need to set recursive_insert since the NewArena call itself | 203 // We need to set recursive_insert since the NewArena call itself |
208 // will already do some allocations with mmap which our hooks will catch | 204 // will already do some allocations with mmap which our hooks will catch |
209 // recursive_insert allows us to buffer info about these mmap calls. | 205 // recursive_insert allows us to buffer info about these mmap calls. |
210 // Note that Init() can be (and is) sometimes called | 206 // Note that Init() can be (and is) sometimes called |
211 // already from within an mmap/sbrk hook. | 207 // already from within an mmap/sbrk hook. |
212 recursive_insert = true; | 208 recursive_insert = true; |
213 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); | 209 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); |
214 recursive_insert = false; | 210 recursive_insert = false; |
215 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones | 211 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones |
216 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before | 212 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before |
217 // recursive_insert = false; as InsertRegionLocked will also construct | 213 // recursive_insert = false; as InsertRegionLocked will also construct |
218 // regions_ on demand for us. | 214 // regions_ on demand for us. |
219 Unlock(); | 215 Unlock(); |
220 RAW_VLOG(10, "MemoryRegionMap Init done"); | 216 RAW_VLOG(10, "MemoryRegionMap Init done"); |
221 } | 217 } |
222 | 218 |
223 bool MemoryRegionMap::Shutdown() { | 219 bool MemoryRegionMap::Shutdown() { |
224 RAW_VLOG(10, "MemoryRegionMap Shutdown"); | 220 RAW_VLOG(10, "MemoryRegionMap Shutdown"); |
225 Lock(); | 221 Lock(); |
226 RAW_CHECK(client_count_ > 0, ""); | 222 RAW_CHECK(client_count_ > 0, ""); |
227 client_count_ -= 1; | 223 client_count_ -= 1; |
228 if (client_count_ != 0) { // not last client; need not really shutdown | 224 if (client_count_ != 0) { // not last client; need not really shutdown |
229 Unlock(); | 225 Unlock(); |
230 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); | 226 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); |
231 return true; | 227 return true; |
232 } | 228 } |
233 CheckMallocHooks(); // we assume no other hooks | 229 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
234 MallocHook::SetMmapHook(NULL); | 230 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
235 MallocHook::SetMremapHook(NULL); | 231 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
236 MallocHook::SetSbrkHook(NULL); | 232 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
237 MallocHook::SetMunmapHook(NULL); | |
238 if (regions_) regions_->~RegionSet(); | 233 if (regions_) regions_->~RegionSet(); |
239 regions_ = NULL; | 234 regions_ = NULL; |
240 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); | 235 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); |
241 if (deleted_arena) { | 236 if (deleted_arena) { |
242 arena_ = 0; | 237 arena_ = 0; |
243 } else { | 238 } else { |
244 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); | 239 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); |
245 } | 240 } |
246 Unlock(); | 241 Unlock(); |
247 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); | 242 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); |
248 return deleted_arena; | 243 return deleted_arena; |
249 } | 244 } |
250 | 245 |
251 void MemoryRegionMap::CheckMallocHooks() { | |
252 if (MallocHook::GetMmapHook() != MmapHook || | |
253 MallocHook::GetMunmapHook() != MunmapHook || | |
254 MallocHook::GetMremapHook() != MremapHook || | |
255 MallocHook::GetSbrkHook() != SbrkHook) { | |
256 RAW_LOG(FATAL, "Our mmap/mremap/munmap/sbrk MallocHook-s got changed."); | |
257 } | |
258 } | |
259 | |
260 // Invariants (once libpthread_initialized is true): | 246 // Invariants (once libpthread_initialized is true): |
261 // * While lock_ is not held, recursion_count_ is 0 (and | 247 // * While lock_ is not held, recursion_count_ is 0 (and |
262 // lock_owner_tid_ is the previous owner, but we don't rely on | 248 // lock_owner_tid_ is the previous owner, but we don't rely on |
263 // that). | 249 // that). |
264 // * recursion_count_ and lock_owner_tid_ are only written while | 250 // * recursion_count_ and lock_owner_tid_ are only written while |
265 // both lock_ and owner_lock_ are held. They may be read under | 251 // both lock_ and owner_lock_ are held. They may be read under |
266 // just owner_lock_. | 252 // just owner_lock_. |
267 // * At entry and exit of Lock() and Unlock(), the current thread | 253 // * At entry and exit of Lock() and Unlock(), the current thread |
268 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) | 254 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) |
269 // && recursion_count_ > 0. | 255 // && recursion_count_ > 0. |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
653 r != regions_->end(); ++r) { | 639 r != regions_->end(); ++r) { |
654 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " | 640 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " |
655 "from 0x%"PRIxPTR" stack=%d", | 641 "from 0x%"PRIxPTR" stack=%d", |
656 r->start_addr, r->end_addr, r->caller(), r->is_stack); | 642 r->start_addr, r->end_addr, r->caller(), r->is_stack); |
657 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); | 643 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); |
658 // this must be caused by uncontrolled recursive operations on regions_ | 644 // this must be caused by uncontrolled recursive operations on regions_ |
659 previous = r->end_addr; | 645 previous = r->end_addr; |
660 } | 646 } |
661 RAW_LOG(INFO, "End of regions list"); | 647 RAW_LOG(INFO, "End of regions list"); |
662 } | 648 } |
OLD | NEW |