Index: base/allocator/allocator_shim.cc |
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc |
index 48b13501fe443ddc2062a1fab5e14a43a3a19649..7a5cfd6c6eeb1983553e9757a284b5ef96d52765 100644 |
--- a/base/allocator/allocator_shim.cc |
+++ b/base/allocator/allocator_shim.cc |
@@ -142,8 +142,10 @@ void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) { |
} // namespace base |
// The Shim* functions below are the entry-points into the shim-layer and |
-// are supposed to be invoked / aliased by the allocator_shim_override_* |
+// are supposed to be invoked by the allocator_shim_override_* |
// headers to route the malloc / new symbols through the shim layer. |
+// They are defined as ALWAYS_INLINE in order to remove a level of indirection |
+// between the system-defined entry points and the shim implementations. |
extern "C" { |
// The general pattern for allocations is: |
@@ -158,7 +160,7 @@ extern "C" { |
// just suicide priting a message). |
// - Assume it did succeed if it returns, in which case reattempt the alloc. |
-void* ShimCppNew(size_t size) { |
+ALWAYS_INLINE void* ShimCppNew(size_t size) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
void* ptr; |
do { |
@@ -171,7 +173,7 @@ void* ShimCppNew(size_t size) { |
return ptr; |
} |
-void ShimCppDelete(void* address) { |
+ALWAYS_INLINE void ShimCppDelete(void* address) { |
void* context = nullptr; |
#if defined(OS_MACOSX) |
context = malloc_default_zone(); |
@@ -180,7 +182,7 @@ void ShimCppDelete(void* address) { |
return chain_head->free_function(chain_head, address, context); |
} |
-void* ShimMalloc(size_t size, void* context) { |
+ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
void* ptr; |
do { |
@@ -190,7 +192,7 @@ void* ShimMalloc(size_t size, void* context) { |
return ptr; |
} |
-void* ShimCalloc(size_t n, size_t size, void* context) { |
+ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
void* ptr; |
do { |
@@ -201,7 +203,7 @@ void* ShimCalloc(size_t n, size_t size, void* context) { |
return ptr; |
} |
-void* ShimRealloc(void* address, size_t size, void* context) { |
+ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) { |
// realloc(size == 0) means free() and might return a nullptr. We should |
// not call the std::new_handler in that case, though. |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
@@ -213,7 +215,7 @@ void* ShimRealloc(void* address, size_t size, void* context) { |
return ptr; |
} |
-void* ShimMemalign(size_t alignment, size_t size, void* context) { |
+ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
void* ptr; |
do { |
@@ -224,7 +226,7 @@ void* ShimMemalign(size_t alignment, size_t size, void* context) { |
return ptr; |
} |
-int ShimPosixMemalign(void** res, size_t alignment, size_t size) { |
+ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) { |
// posix_memalign is supposed to check the arguments. See tc_posix_memalign() |
// in tc_malloc.cc. |
if (((alignment % sizeof(void*)) != 0) || |
@@ -236,11 +238,11 @@ int ShimPosixMemalign(void** res, size_t alignment, size_t size) { |
return ptr ? 0 : ENOMEM; |
} |
-void* ShimValloc(size_t size, void* context) { |
+ALWAYS_INLINE void* ShimValloc(size_t size, void* context) { |
return ShimMemalign(GetCachedPageSize(), size, context); |
} |
-void* ShimPvalloc(size_t size) { |
+ALWAYS_INLINE void* ShimPvalloc(size_t size) { |
// pvalloc(0) should allocate one page, according to its man page. |
if (size == 0) { |
size = GetCachedPageSize(); |
@@ -252,35 +254,35 @@ void* ShimPvalloc(size_t size) { |
return ShimMemalign(GetCachedPageSize(), size, nullptr); |
} |
-void ShimFree(void* address, void* context) { |
+ALWAYS_INLINE void ShimFree(void* address, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
return chain_head->free_function(chain_head, address, context); |
} |
-size_t ShimGetSizeEstimate(const void* address, void* context) { |
+ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
return chain_head->get_size_estimate_function( |
chain_head, const_cast<void*>(address), context); |
} |
-unsigned ShimBatchMalloc(size_t size, |
- void** results, |
- unsigned num_requested, |
- void* context) { |
+ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size, |
+ void** results, |
+ unsigned num_requested, |
+ void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
return chain_head->batch_malloc_function(chain_head, size, results, |
num_requested, context); |
} |
-void ShimBatchFree(void** to_be_freed, |
- unsigned num_to_be_freed, |
- void* context) { |
+ALWAYS_INLINE void ShimBatchFree(void** to_be_freed, |
+ unsigned num_to_be_freed, |
+ void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
return chain_head->batch_free_function(chain_head, to_be_freed, |
num_to_be_freed, context); |
} |
-void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) { |
+ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) { |
const allocator::AllocatorDispatch* const chain_head = GetChainHead(); |
return chain_head->free_definite_size_function(chain_head, ptr, size, |
context); |