OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/allocator/allocator_shim.h" |
| 6 |
| 7 #include <errno.h> |
| 8 #include <unistd.h> |
| 9 |
| 10 #include <new> |
| 11 |
| 12 #include "base/atomicops.h" |
| 13 #include "base/logging.h" |
| 14 #include "base/macros.h" |
| 15 #include "base/threading/platform_thread.h" |
| 16 #include "build/build_config.h" |
| 17 |
| 18 // No calls to malloc / new in this file. They would would cause re-entrancy of |
| 19 // the shim, which is hard to deal with. Keep this code as simple as possible |
| 20 // and don't use any external C++ object here, not even //base ones. Even if |
| 21 // they are safe to use today, in future they might be refactored. |
| 22 |
| 23 namespace { |
| 24 |
| 25 using namespace base; |
| 26 |
| 27 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>( |
| 28 &allocator::AllocatorDispatch::default_dispatch); |
| 29 |
| 30 bool g_call_new_handler_on_malloc_failure = false; |
| 31 subtle::Atomic32 g_new_handler_lock = 0; |
| 32 |
| 33 // In theory this should be just base::ThreadChecker. But we can't afford |
| 34 // the luxury of a LazyInstance<ThreadChecker> here as it would cause a new(). |
| 35 bool CalledOnValidThread() { |
| 36 using subtle::Atomic32; |
| 37 const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId); |
| 38 static Atomic32 g_tid = kInvalidTID; |
| 39 Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId()); |
| 40 Atomic32 prev_tid = |
| 41 subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid); |
| 42 return prev_tid == kInvalidTID || prev_tid == cur_tid; |
| 43 } |
| 44 |
| 45 inline size_t GetPageSize() { |
| 46 static size_t pagesize = 0; |
| 47 if (!pagesize) |
| 48 pagesize = sysconf(_SC_PAGESIZE); |
| 49 return pagesize; |
| 50 } |
| 51 |
| 52 // Calls the std::new handler thread-safely. Returns true if a new_handler was |
| 53 // set and called, false if no new_handler was set. |
| 54 bool CallNewHandler() { |
| 55 // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed |
| 56 // to be thread safe and would avoid the spinlock boilerplate here. However |
| 57 // it doesn't seem to be available yet in the Linux chroot headers yet. |
| 58 std::new_handler nh; |
| 59 { |
| 60 while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1)) |
| 61 PlatformThread::YieldCurrentThread(); |
| 62 nh = std::set_new_handler(0); |
| 63 ignore_result(std::set_new_handler(nh)); |
| 64 subtle::Release_Store(&g_new_handler_lock, 0); |
| 65 } |
| 66 if (!nh) |
| 67 return false; |
| 68 (*nh)(); |
| 69 // Assume the new_handler will abort if it fails. Exception are disabled and |
| 70 // we don't support the case of a new_handler throwing std::bad_balloc. |
| 71 return true; |
| 72 } |
| 73 |
| 74 inline const allocator::AllocatorDispatch* GetChainHead() { |
| 75 return reinterpret_cast<const allocator::AllocatorDispatch*>( |
| 76 subtle::NoBarrier_Load(&g_chain_head)); |
| 77 } |
| 78 |
| 79 } // namespace |
| 80 |
| 81 namespace base { |
| 82 namespace allocator { |
| 83 |
| 84 void SetCallNewHandlerOnMallocFailure(bool value) { |
| 85 g_call_new_handler_on_malloc_failure = value; |
| 86 } |
| 87 |
| 88 void* UncheckedAlloc(size_t size) { |
| 89 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 90 return chain_head->alloc_function(chain_head, size); |
| 91 } |
| 92 |
| 93 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) { |
| 94 // Ensure this is always called on the same thread. |
| 95 DCHECK(CalledOnValidThread()); |
| 96 |
| 97 dispatch->next = GetChainHead(); |
| 98 |
| 99 // This function does not guarantee to be thread-safe w.r.t. concurrent |
| 100 // insertions, but still has to guarantee that all the threads always |
| 101 // see a consistent chain, hence the MemoryBarrier() below. |
| 102 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so |
| 103 // we don't really want this to be a release-store with a corresponding |
| 104 // acquire-load during malloc(). |
| 105 subtle::MemoryBarrier(); |
| 106 |
| 107 subtle::NoBarrier_Store(&g_chain_head, |
| 108 reinterpret_cast<subtle::AtomicWord>(dispatch)); |
| 109 } |
| 110 |
| 111 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) { |
| 112 DCHECK(CalledOnValidThread()); |
| 113 DCHECK_EQ(GetChainHead(), dispatch); |
| 114 subtle::NoBarrier_Store(&g_chain_head, |
| 115 reinterpret_cast<subtle::AtomicWord>(dispatch->next)); |
| 116 } |
| 117 |
| 118 } // namespace allocator |
| 119 } // namespace base |
| 120 |
| 121 // The Shim* functions below are the entry-points into the shim-layer and |
| 122 // are supposed to be invoked / aliased by the allocator_shim_override_* |
| 123 // headers to route the malloc / new symbols through the shim layer. |
| 124 extern "C" { |
| 125 |
| 126 // The general pattern for allocations is: |
| 127 // - Try to allocate, if succeded return the pointer. |
| 128 // - If the allocation failed: |
| 129 // - Call the std::new_handler if it was a C++ allocation. |
| 130 // - Call the std::new_handler if it was a malloc() (or calloc() or similar) |
| 131 // AND SetCallNewHandlerOnMallocFailure(true). |
| 132 // - If the std::new_handler is NOT set just return nullptr. |
| 133 // - If the std::new_handler is set: |
| 134 // - Assume it will abort() if it fails (very likely the new_handler will |
| 135 // just suicide priting a message). |
| 136 // - Assume it did succeed if it returns, in which case reattempt the alloc. |
| 137 |
| 138 void* ShimCppNew(size_t size) { |
| 139 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 140 void* ptr; |
| 141 do { |
| 142 ptr = chain_head->alloc_function(chain_head, size); |
| 143 } while (!ptr && CallNewHandler()); |
| 144 return ptr; |
| 145 } |
| 146 |
| 147 void ShimCppDelete(void* address) { |
| 148 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 149 return chain_head->free_function(chain_head, address); |
| 150 } |
| 151 |
| 152 void* ShimMalloc(size_t size) { |
| 153 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 154 void* ptr; |
| 155 do { |
| 156 ptr = chain_head->alloc_function(chain_head, size); |
| 157 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler()); |
| 158 return ptr; |
| 159 } |
| 160 |
| 161 void* ShimCalloc(size_t n, size_t size) { |
| 162 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 163 void* ptr; |
| 164 do { |
| 165 ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size); |
| 166 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler()); |
| 167 return ptr; |
| 168 } |
| 169 |
| 170 void* ShimRealloc(void* address, size_t size) { |
| 171 // realloc(size == 0) means free() and might return a nullptr. We should |
| 172 // not call the std::new_handler in that case, though. |
| 173 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 174 void* ptr; |
| 175 do { |
| 176 ptr = chain_head->realloc_function(chain_head, address, size); |
| 177 } while (!ptr && size && g_call_new_handler_on_malloc_failure && |
| 178 CallNewHandler()); |
| 179 return ptr; |
| 180 } |
| 181 |
| 182 void* ShimMemalign(size_t alignment, size_t size) { |
| 183 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 184 void* ptr; |
| 185 do { |
| 186 ptr = chain_head->alloc_aligned_function(chain_head, alignment, size); |
| 187 } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler()); |
| 188 return ptr; |
| 189 } |
| 190 |
| 191 int ShimPosixMemalign(void** res, size_t alignment, size_t size) { |
| 192 // posix_memalign is supposed to check the arguments. See tc_posix_memalign() |
| 193 // in tc_malloc.cc. |
| 194 if (((alignment % sizeof(void*)) != 0) || |
| 195 ((alignment & (alignment - 1)) != 0) || (alignment == 0)) { |
| 196 return EINVAL; |
| 197 } |
| 198 void* ptr = ShimMemalign(alignment, size); |
| 199 *res = ptr; |
| 200 return ptr ? 0 : ENOMEM; |
| 201 } |
| 202 |
| 203 void* ShimValloc(size_t size) { |
| 204 return ShimMemalign(GetPageSize(), size); |
| 205 } |
| 206 |
| 207 void* ShimPvalloc(size_t size) { |
| 208 // pvalloc(0) should allocate one page, according to its man page. |
| 209 if (size == 0) { |
| 210 size = GetPageSize(); |
| 211 } else { |
| 212 size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1); |
| 213 } |
| 214 return ShimMemalign(GetPageSize(), size); |
| 215 } |
| 216 |
| 217 void ShimFree(void* address) { |
| 218 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
| 219 return chain_head->free_function(chain_head, address); |
| 220 } |
| 221 |
| 222 } // extern "C" |
| 223 |
| 224 // Cpp symbols (new / delete) should always be routed through the shim layer. |
| 225 #include "base/allocator/allocator_shim_override_cpp_symbols.h" |
| 226 |
| 227 // Ditto for plain malloc() / calloc() / free() etc. symbols. |
| 228 #include "base/allocator/allocator_shim_override_libc_symbols.h" |
| 229 |
| 230 // In the case of tcmalloc we also want to plumb into the glibc hooks |
| 231 // to avoid that allocations made in glibc itself (e.g., strdup()) get |
| 232 // accidentally performed on the glibc heap instead of the tcmalloc one. |
| 233 #if defined(USE_TCMALLOC) |
| 234 #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h" |
| 235 #endif |
| 236 |
| 237 // Cross-checks. |
| 238 |
| 239 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 240 #error The allocator shim should not be compiled when building for memory tools. |
| 241 #endif |
| 242 |
| 243 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \ |
| 244 (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS) |
| 245 #error This code cannot be used when exceptions are turned on. |
| 246 #endif |
OLD | NEW |