Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(489)

Side by Side Diff: base/allocator/allocator_shim.cc

Issue 2556563002: Make InsertAllocatorDispatch thread safe. (Closed)
Patch Set: Remove the now-redundant single-thread check. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/allocator/allocator_shim.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/allocator/allocator_shim.h" 5 #include "base/allocator/allocator_shim.h"
6 6
7 #include <errno.h> 7 #include <errno.h>
8 8
9 #include <new> 9 #include <new>
10 10
(...skipping 21 matching lines...) Expand all
32 32
33 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>( 33 subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
34 &allocator::AllocatorDispatch::default_dispatch); 34 &allocator::AllocatorDispatch::default_dispatch);
35 35
36 bool g_call_new_handler_on_malloc_failure = false; 36 bool g_call_new_handler_on_malloc_failure = false;
37 37
38 #if !defined(OS_WIN) 38 #if !defined(OS_WIN)
39 subtle::Atomic32 g_new_handler_lock = 0; 39 subtle::Atomic32 g_new_handler_lock = 0;
40 #endif 40 #endif
41 41
42 // In theory this should be just base::ThreadChecker. But we can't afford
43 // the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
44 bool CalledOnValidThread() {
45 using subtle::Atomic32;
46 const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
47 static Atomic32 g_tid = kInvalidTID;
48 Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
49 Atomic32 prev_tid =
50 subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
51 return prev_tid == kInvalidTID || prev_tid == cur_tid;
52 }
53
54 inline size_t GetCachedPageSize() { 42 inline size_t GetCachedPageSize() {
55 static size_t pagesize = 0; 43 static size_t pagesize = 0;
56 if (!pagesize) 44 if (!pagesize)
57 pagesize = base::GetPageSize(); 45 pagesize = base::GetPageSize();
58 return pagesize; 46 return pagesize;
59 } 47 }
60 48
61 // Calls the std::new handler thread-safely. Returns true if a new_handler was 49 // Calls the std::new handler thread-safely. Returns true if a new_handler was
62 // set and called, false if no new_handler was set. 50 // set and called, false if no new_handler was set.
63 bool CallNewHandler(size_t size) { 51 bool CallNewHandler(size_t size) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
105 void SetCallNewHandlerOnMallocFailure(bool value) { 93 void SetCallNewHandlerOnMallocFailure(bool value) {
106 g_call_new_handler_on_malloc_failure = value; 94 g_call_new_handler_on_malloc_failure = value;
107 } 95 }
108 96
109 void* UncheckedAlloc(size_t size) { 97 void* UncheckedAlloc(size_t size) {
110 const allocator::AllocatorDispatch* const chain_head = GetChainHead(); 98 const allocator::AllocatorDispatch* const chain_head = GetChainHead();
111 return chain_head->alloc_function(chain_head, size); 99 return chain_head->alloc_function(chain_head, size);
112 } 100 }
113 101
114 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) { 102 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
115 // Ensure this is always called on the same thread. 103 // Loop in case of (an unlikely) race on setting the list head.
116 DCHECK(CalledOnValidThread()); 104 size_t kMaxRetries = 7;
105 for (size_t i = 0; i < kMaxRetries; ++i) {
106 const AllocatorDispatch* chain_head = GetChainHead();
107 dispatch->next = chain_head;
117 108
118 dispatch->next = GetChainHead(); 109 // This function guarantees to be thread-safe w.r.t. concurrent
110 // insertions. It also has to guarantee that all the threads always
111 // see a consistent chain, hence the MemoryBarrier() below.
112 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
113 // we don't really want this to be a release-store with a corresponding
114 // acquire-load during malloc().
115 subtle::MemoryBarrier();
116 subtle::AtomicWord old_value =
117 reinterpret_cast<subtle::AtomicWord>(chain_head);
118 // Set the chain head to the new dispatch atomically. If we lose the race,
119 // the comparison will fail, and the new head of chain will be returned.
120 if (subtle::NoBarrier_CompareAndSwap(
121 &g_chain_head, old_value,
122 reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
123 // Success.
124 return;
125 }
126 }
119 127
120 // This function does not guarantee to be thread-safe w.r.t. concurrent 128 CHECK(false); // Too many retries, this shouldn't happen.
121 // insertions, but still has to guarantee that all the threads always
122 // see a consistent chain, hence the MemoryBarrier() below.
123 // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
124 // we don't really want this to be a release-store with a corresponding
125 // acquire-load during malloc().
126 subtle::MemoryBarrier();
127
128 subtle::NoBarrier_Store(&g_chain_head,
129 reinterpret_cast<subtle::AtomicWord>(dispatch));
130 } 129 }
131 130
132 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) { 131 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
133 DCHECK(CalledOnValidThread());
134 DCHECK_EQ(GetChainHead(), dispatch); 132 DCHECK_EQ(GetChainHead(), dispatch);
135 subtle::NoBarrier_Store(&g_chain_head, 133 subtle::NoBarrier_Store(&g_chain_head,
136 reinterpret_cast<subtle::AtomicWord>(dispatch->next)); 134 reinterpret_cast<subtle::AtomicWord>(dispatch->next));
137 } 135 }
138 136
139 } // namespace allocator 137 } // namespace allocator
140 } // namespace base 138 } // namespace base
141 139
142 // The Shim* functions below are the entry-points into the shim-layer and 140 // The Shim* functions below are the entry-points into the shim-layer and
143 // are supposed to be invoked / aliased by the allocator_shim_override_* 141 // are supposed to be invoked / aliased by the allocator_shim_override_*
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 // Cross-checks. 271 // Cross-checks.
274 272
275 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 273 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
276 #error The allocator shim should not be compiled when building for memory tools. 274 #error The allocator shim should not be compiled when building for memory tools.
277 #endif 275 #endif
278 276
279 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \ 277 #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
280 (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS) 278 (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
281 #error This code cannot be used when exceptions are turned on. 279 #error This code cannot be used when exceptions are turned on.
282 #endif 280 #endif
OLDNEW
« no previous file with comments | « base/allocator/allocator_shim.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698