Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(618)

Side by Side Diff: base/threading/thread_local_storage.cc

Issue 2395043002: Revert of Add Reclaim Support to ThreadLocalStorage (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | base/threading/thread_local_storage_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/threading/thread_local_storage.h" 5 #include "base/threading/thread_local_storage.h"
6 6
7 #include "base/atomicops.h" 7 #include "base/atomicops.h"
8 #include "base/lazy_instance.h"
9 #include "base/logging.h" 8 #include "base/logging.h"
10 #include "base/synchronization/lock.h"
11 #include "build/build_config.h" 9 #include "build/build_config.h"
12 10
13 using base::internal::PlatformThreadLocalStorage; 11 using base::internal::PlatformThreadLocalStorage;
14 12
15 namespace { 13 namespace {
16 // In order to make TLS destructors work, we need to keep around a function 14 // In order to make TLS destructors work, we need to keep around a function
17 // pointer to the destructor for each slot. We keep this array of pointers in a 15 // pointer to the destructor for each slot. We keep this array of pointers in a
18 // global (static) array. 16 // global (static) array.
19 // We use the single OS-level TLS slot (giving us one pointer per thread) to 17 // We use the single OS-level TLS slot (giving us one pointer per thread) to
20 // hold a pointer to a per-thread array (table) of slots that we allocate to 18 // hold a pointer to a per-thread array (table) of slots that we allocate to
21 // Chromium consumers. 19 // Chromium consumers.
22 20
23 // g_native_tls_key is the one native TLS that we use. It stores our table. 21 // g_native_tls_key is the one native TLS that we use. It stores our table.
24 base::subtle::Atomic32 g_native_tls_key = 22 base::subtle::Atomic32 g_native_tls_key =
25 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES; 23 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
26 24
27 // The maximum number of slots in our thread local storage stack. 25 // g_last_used_tls_key is the high-water-mark of allocated thread local storage.
28 constexpr int kThreadLocalStorageSize = 256; 26 // Each allocation is an index into our g_tls_destructors[]. Each such index is
29 constexpr int kInvalidSlotValue = -1; 27 // assigned to the instance variable slot_ in a ThreadLocalStorage::Slot
28 // instance. We reserve the value slot_ == 0 to indicate that the corresponding
29 // instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called,
30 // etc.). This reserved use of 0 is then stated as the initial value of
31 // g_last_used_tls_key, so that the first issued index will be 1.
32 base::subtle::Atomic32 g_last_used_tls_key = 0;
30 33
31 enum TlsStatus { 34 // The maximum number of 'slots' in our thread local storage stack.
32 FREE, 35 const int kThreadLocalStorageSize = 256;
33 IN_USE,
34 };
35
36 struct TlsMetadata {
37 TlsStatus status;
38 base::ThreadLocalStorage::TLSDestructorFunc destructor;
39 };
40
41 // This LazyInstance isn't needed until after we've constructed the per-thread
42 // TLS vector, so it's safe to use.
43 base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock;
44 TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
45 size_t g_last_assigned_slot = 0;
46 36
47 // The maximum number of times to try to clear slots by calling destructors. 37 // The maximum number of times to try to clear slots by calling destructors.
48 // Use pthread naming convention for clarity. 38 // Use pthread naming convention for clarity.
49 constexpr int kMaxDestructorIterations = kThreadLocalStorageSize; 39 const int kMaxDestructorIterations = kThreadLocalStorageSize;
40
41 // An array of destructor function pointers for the slots. If a slot has a
42 // destructor, it will be stored in its corresponding entry in this array.
43 // The elements are volatile to ensure that when the compiler reads the value
44 // to potentially call the destructor, it does so once, and that value is tested
45 // for null-ness and then used. Yes, that would be a weird de-optimization,
46 // but I can imagine some register machines where it was just as easy to
47 // re-fetch an array element, and I want to be sure a call to free the key
48 // (i.e., null out the destructor entry) that happens on a separate thread can't
49 // hurt the racy calls to the destructors on another thread.
50 volatile base::ThreadLocalStorage::TLSDestructorFunc
51 g_tls_destructors[kThreadLocalStorageSize];
50 52
51 // This function is called to initialize our entire Chromium TLS system. 53 // This function is called to initialize our entire Chromium TLS system.
52 // It may be called very early, and we need to complete most all of the setup 54 // It may be called very early, and we need to complete most all of the setup
53 // (initialization) before calling *any* memory allocator functions, which may 55 // (initialization) before calling *any* memory allocator functions, which may
54 // recursively depend on this initialization. 56 // recursively depend on this initialization.
55 // As a result, we use Atomics, and avoid anything (like a singleton) that might 57 // As a result, we use Atomics, and avoid anything (like a singleton) that might
56 // require memory allocations. 58 // require memory allocations.
57 void** ConstructTlsVector() { 59 void** ConstructTlsVector() {
58 PlatformThreadLocalStorage::TLSKey key = 60 PlatformThreadLocalStorage::TLSKey key =
59 base::subtle::NoBarrier_Load(&g_native_tls_key); 61 base::subtle::NoBarrier_Load(&g_native_tls_key);
60 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { 62 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
61 CHECK(PlatformThreadLocalStorage::AllocTLS(&key)); 63 CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
62 64
63 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or 65 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
64 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we 66 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
65 // define an almost impossible value be it. 67 // define an almost impossible value be it.
66 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc 68 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
67 // another TLS slot. 69 // another TLS slot.
68 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { 70 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
69 PlatformThreadLocalStorage::TLSKey tmp = key; 71 PlatformThreadLocalStorage::TLSKey tmp = key;
70 CHECK(PlatformThreadLocalStorage::AllocTLS(&key) && 72 CHECK(PlatformThreadLocalStorage::AllocTLS(&key) &&
71 key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES); 73 key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
72 PlatformThreadLocalStorage::FreeTLS(tmp); 74 PlatformThreadLocalStorage::FreeTLS(tmp);
73 } 75 }
74 // Atomically test-and-set the tls_key. If the key is 76 // Atomically test-and-set the tls_key. If the key is
75 // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as 77 // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
76 // another thread already did our dirty work. 78 // another thread already did our dirty work.
77 if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES != 79 if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
78 static_cast<PlatformThreadLocalStorage::TLSKey>( 80 static_cast<PlatformThreadLocalStorage::TLSKey>(
79 base::subtle::NoBarrier_CompareAndSwap( 81 base::subtle::NoBarrier_CompareAndSwap(
80 &g_native_tls_key, 82 &g_native_tls_key,
81 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) { 83 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) {
82 // We've been shortcut. Another thread replaced g_native_tls_key first so 84 // We've been shortcut. Another thread replaced g_native_tls_key first so
83 // we need to destroy our index and use the one the other thread got 85 // we need to destroy our index and use the one the other thread got
84 // first. 86 // first.
85 PlatformThreadLocalStorage::FreeTLS(key); 87 PlatformThreadLocalStorage::FreeTLS(key);
86 key = base::subtle::NoBarrier_Load(&g_native_tls_key); 88 key = base::subtle::NoBarrier_Load(&g_native_tls_key);
87 } 89 }
88 } 90 }
89 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key)); 91 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
90 92
91 // Some allocators, such as TCMalloc, make use of thread local storage. As a 93 // Some allocators, such as TCMalloc, make use of thread local storage.
92 // result, any attempt to call new (or malloc) will lazily cause such a system 94 // As a result, any attempt to call new (or malloc) will lazily cause such a
93 // to initialize, which will include registering for a TLS key. If we are not 95 // system to initialize, which will include registering for a TLS key. If we
94 // careful here, then that request to create a key will call new back, and 96 // are not careful here, then that request to create a key will call new back,
95 // we'll have an infinite loop. We avoid that as follows: Use a stack 97 // and we'll have an infinite loop. We avoid that as follows:
96 // allocated vector, so that we don't have dependence on our allocator until 98 // Use a stack allocated vector, so that we don't have dependence on our
97 // our service is in place. (i.e., don't even call new until after we're 99 // allocator until our service is in place. (i.e., don't even call new until
98 // setup) 100 // after we're setup)
99 void* stack_allocated_tls_data[kThreadLocalStorageSize]; 101 void* stack_allocated_tls_data[kThreadLocalStorageSize];
100 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data)); 102 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
101 // Ensure that any rentrant calls change the temp version. 103 // Ensure that any rentrant calls change the temp version.
102 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); 104 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
103 105
104 // Allocate an array to store our data. 106 // Allocate an array to store our data.
105 void** tls_data = new void*[kThreadLocalStorageSize]; 107 void** tls_data = new void*[kThreadLocalStorageSize];
106 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data)); 108 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
107 PlatformThreadLocalStorage::SetTLSValue(key, tls_data); 109 PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
108 return tls_data; 110 return tls_data;
109 } 111 }
110 112
111 void OnThreadExitInternal(void* value) { 113 void OnThreadExitInternal(void* value) {
112 DCHECK(value); 114 DCHECK(value);
113 void** tls_data = static_cast<void**>(value); 115 void** tls_data = static_cast<void**>(value);
114 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread 116 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
115 // terminates, one of the destructor calls we make may be to shut down an 117 // terminates, one of the destructor calls we make may be to shut down an
116 // allocator. We have to be careful that after we've shutdown all of the known 118 // allocator. We have to be careful that after we've shutdown all of the
117 // destructors (perchance including an allocator), that we don't call the 119 // known destructors (perchance including an allocator), that we don't call
118 // allocator and cause it to resurrect itself (with no possibly destructor 120 // the allocator and cause it to resurrect itself (with no possibly destructor
119 // call to follow). We handle this problem as follows: Switch to using a stack 121 // call to follow). We handle this problem as follows:
120 // allocated vector, so that we don't have dependence on our allocator after 122 // Switch to using a stack allocated vector, so that we don't have dependence
121 // we have called all g_tls_metadata destructors. (i.e., don't even call 123 // on our allocator after we have called all g_tls_destructors. (i.e., don't
122 // delete[] after we're done with destructors.) 124 // even call delete[] after we're done with destructors.)
123 void* stack_allocated_tls_data[kThreadLocalStorageSize]; 125 void* stack_allocated_tls_data[kThreadLocalStorageSize];
124 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data)); 126 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
125 // Ensure that any re-entrant calls change the temp version. 127 // Ensure that any re-entrant calls change the temp version.
126 PlatformThreadLocalStorage::TLSKey key = 128 PlatformThreadLocalStorage::TLSKey key =
127 base::subtle::NoBarrier_Load(&g_native_tls_key); 129 base::subtle::NoBarrier_Load(&g_native_tls_key);
128 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); 130 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
129 delete[] tls_data; // Our last dependence on an allocator. 131 delete[] tls_data; // Our last dependence on an allocator.
130 132
131 // Snapshot the TLS Metadata so we don't have to lock on every access.
132 TlsMetadata tls_metadata[kThreadLocalStorageSize];
133 {
134 base::AutoLock auto_lock(g_tls_metadata_lock.Get());
135 memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
136 }
137
138 int remaining_attempts = kMaxDestructorIterations; 133 int remaining_attempts = kMaxDestructorIterations;
139 bool need_to_scan_destructors = true; 134 bool need_to_scan_destructors = true;
140 while (need_to_scan_destructors) { 135 while (need_to_scan_destructors) {
141 need_to_scan_destructors = false; 136 need_to_scan_destructors = false;
142 // Try to destroy the first-created-slot (which is slot 1) in our last 137 // Try to destroy the first-created-slot (which is slot 1) in our last
143 // destructor call. That user was able to function, and define a slot with 138 // destructor call. That user was able to function, and define a slot with
144 // no other services running, so perhaps it is a basic service (like an 139 // no other services running, so perhaps it is a basic service (like an
145 // allocator) and should also be destroyed last. If we get the order wrong, 140 // allocator) and should also be destroyed last. If we get the order wrong,
146 // then we'll iterate several more times, so it is really not that critical 141 // then we'll itterate several more times, so it is really not that
147 // (but it might help). 142 // critical (but it might help).
148 for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) { 143 base::subtle::Atomic32 last_used_tls_key =
144 base::subtle::NoBarrier_Load(&g_last_used_tls_key);
145 for (int slot = last_used_tls_key; slot > 0; --slot) {
149 void* tls_value = stack_allocated_tls_data[slot]; 146 void* tls_value = stack_allocated_tls_data[slot];
150 if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE) 147 if (tls_value == NULL)
151 continue; 148 continue;
152 149
153 base::ThreadLocalStorage::TLSDestructorFunc destructor = 150 base::ThreadLocalStorage::TLSDestructorFunc destructor =
154 tls_metadata[slot].destructor; 151 g_tls_destructors[slot];
155 if (!destructor) 152 if (destructor == NULL)
156 continue; 153 continue;
157 stack_allocated_tls_data[slot] = nullptr; // pre-clear the slot. 154 stack_allocated_tls_data[slot] = NULL; // pre-clear the slot.
158 destructor(tls_value); 155 destructor(tls_value);
159 // Any destructor might have called a different service, which then set a 156 // Any destructor might have called a different service, which then set
160 // different slot to a non-null value. Hence we need to check the whole 157 // a different slot to a non-NULL value. Hence we need to check
161 // vector again. This is a pthread standard. 158 // the whole vector again. This is a pthread standard.
162 need_to_scan_destructors = true; 159 need_to_scan_destructors = true;
163 } 160 }
164 if (--remaining_attempts <= 0) { 161 if (--remaining_attempts <= 0) {
165 NOTREACHED(); // Destructors might not have been called. 162 NOTREACHED(); // Destructors might not have been called.
166 break; 163 break;
167 } 164 }
168 } 165 }
169 166
170 // Remove our stack allocated vector. 167 // Remove our stack allocated vector.
171 PlatformThreadLocalStorage::SetTLSValue(key, nullptr); 168 PlatformThreadLocalStorage::SetTLSValue(key, NULL);
172 } 169 }
173 170
174 } // namespace 171 } // namespace
175 172
176 namespace base { 173 namespace base {
177 174
178 namespace internal { 175 namespace internal {
179 176
180 #if defined(OS_WIN) 177 #if defined(OS_WIN)
181 void PlatformThreadLocalStorage::OnThreadExit() { 178 void PlatformThreadLocalStorage::OnThreadExit() {
(...skipping 12 matching lines...) Expand all
194 OnThreadExitInternal(value); 191 OnThreadExitInternal(value);
195 } 192 }
196 #endif // defined(OS_WIN) 193 #endif // defined(OS_WIN)
197 194
198 } // namespace internal 195 } // namespace internal
199 196
200 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { 197 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
201 PlatformThreadLocalStorage::TLSKey key = 198 PlatformThreadLocalStorage::TLSKey key =
202 base::subtle::NoBarrier_Load(&g_native_tls_key); 199 base::subtle::NoBarrier_Load(&g_native_tls_key);
203 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES || 200 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
204 !PlatformThreadLocalStorage::GetTLSValue(key)) { 201 !PlatformThreadLocalStorage::GetTLSValue(key))
205 ConstructTlsVector(); 202 ConstructTlsVector();
206 }
207 203
208 // Grab a new slot. 204 // Grab a new slot.
209 slot_ = kInvalidSlotValue; 205 slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1);
210 { 206 DCHECK_GT(slot_, 0);
211 base::AutoLock auto_lock(g_tls_metadata_lock.Get());
212 for (int i = 0; i < kThreadLocalStorageSize; ++i) {
213 // Tracking the last assigned slot is an attempt to find the next
214 // available slot within one iteration. Under normal usage, slots remain
215 // in use for the lifetime of the process (otherwise before we reclaimed
216 // slots, we would have run out of slots). This makes it highly likely the
217 // next slot is going to be a free slot.
218 size_t slot_candidate =
219 (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
220 if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
221 g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
222 g_tls_metadata[slot_candidate].destructor = destructor;
223 g_last_assigned_slot = slot_candidate;
224 slot_ = slot_candidate;
225 break;
226 }
227 }
228 }
229 CHECK_NE(slot_, kInvalidSlotValue);
230 CHECK_LT(slot_, kThreadLocalStorageSize); 207 CHECK_LT(slot_, kThreadLocalStorageSize);
231 208
232 // Setup our destructor. 209 // Setup our destructor.
210 g_tls_destructors[slot_] = destructor;
233 base::subtle::Release_Store(&initialized_, 1); 211 base::subtle::Release_Store(&initialized_, 1);
234 } 212 }
235 213
236 void ThreadLocalStorage::StaticSlot::Free() { 214 void ThreadLocalStorage::StaticSlot::Free() {
237 DCHECK_NE(slot_, kInvalidSlotValue); 215 // At this time, we don't reclaim old indices for TLS slots.
216 // So all we need to do is wipe the destructor.
217 DCHECK_GT(slot_, 0);
238 DCHECK_LT(slot_, kThreadLocalStorageSize); 218 DCHECK_LT(slot_, kThreadLocalStorageSize);
239 { 219 g_tls_destructors[slot_] = NULL;
240 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); 220 slot_ = 0;
241 g_tls_metadata[slot_].status = TlsStatus::FREE;
242 g_tls_metadata[slot_].destructor = nullptr;
243 }
244 slot_ = kInvalidSlotValue;
245 base::subtle::Release_Store(&initialized_, 0); 221 base::subtle::Release_Store(&initialized_, 0);
246 } 222 }
247 223
248 void* ThreadLocalStorage::StaticSlot::Get() const { 224 void* ThreadLocalStorage::StaticSlot::Get() const {
249 void** tls_data = static_cast<void**>( 225 void** tls_data = static_cast<void**>(
250 PlatformThreadLocalStorage::GetTLSValue( 226 PlatformThreadLocalStorage::GetTLSValue(
251 base::subtle::NoBarrier_Load(&g_native_tls_key))); 227 base::subtle::NoBarrier_Load(&g_native_tls_key)));
252 if (!tls_data) 228 if (!tls_data)
253 tls_data = ConstructTlsVector(); 229 tls_data = ConstructTlsVector();
254 DCHECK_NE(slot_, kInvalidSlotValue); 230 DCHECK_GT(slot_, 0);
255 DCHECK_LT(slot_, kThreadLocalStorageSize); 231 DCHECK_LT(slot_, kThreadLocalStorageSize);
256 return tls_data[slot_]; 232 return tls_data[slot_];
257 } 233 }
258 234
259 void ThreadLocalStorage::StaticSlot::Set(void* value) { 235 void ThreadLocalStorage::StaticSlot::Set(void* value) {
260 void** tls_data = static_cast<void**>( 236 void** tls_data = static_cast<void**>(
261 PlatformThreadLocalStorage::GetTLSValue( 237 PlatformThreadLocalStorage::GetTLSValue(
262 base::subtle::NoBarrier_Load(&g_native_tls_key))); 238 base::subtle::NoBarrier_Load(&g_native_tls_key)));
263 if (!tls_data) 239 if (!tls_data)
264 tls_data = ConstructTlsVector(); 240 tls_data = ConstructTlsVector();
265 DCHECK_NE(slot_, kInvalidSlotValue); 241 DCHECK_GT(slot_, 0);
266 DCHECK_LT(slot_, kThreadLocalStorageSize); 242 DCHECK_LT(slot_, kThreadLocalStorageSize);
267 tls_data[slot_] = value; 243 tls_data[slot_] = value;
268 } 244 }
269 245
270 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) { 246 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
271 tls_slot_.Initialize(destructor); 247 tls_slot_.Initialize(destructor);
272 } 248 }
273 249
274 ThreadLocalStorage::Slot::~Slot() { 250 ThreadLocalStorage::Slot::~Slot() {
275 tls_slot_.Free(); 251 tls_slot_.Free();
276 } 252 }
277 253
278 void* ThreadLocalStorage::Slot::Get() const { 254 void* ThreadLocalStorage::Slot::Get() const {
279 return tls_slot_.Get(); 255 return tls_slot_.Get();
280 } 256 }
281 257
282 void ThreadLocalStorage::Slot::Set(void* value) { 258 void ThreadLocalStorage::Slot::Set(void* value) {
283 tls_slot_.Set(value); 259 tls_slot_.Set(value);
284 } 260 }
285 261
286 } // namespace base 262 } // namespace base
OLDNEW
« no previous file with comments | « no previous file | base/threading/thread_local_storage_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698