Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(293)

Side by Side Diff: base/threading/thread_local_storage.cc

Issue 2383833004: Ensure Freed TLS Slots Contain nullptr on Reallocation (Closed)
Patch Set: Add Banner Doc Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/threading/thread_local_storage.h ('k') | base/threading/thread_local_storage_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/threading/thread_local_storage.h" 5 #include "base/threading/thread_local_storage.h"
6 6
7 #include "base/atomicops.h" 7 #include "base/atomicops.h"
8 #include "base/lazy_instance.h" 8 #include "base/lazy_instance.h"
9 #include "base/logging.h" 9 #include "base/logging.h"
10 #include "base/synchronization/lock.h" 10 #include "base/synchronization/lock.h"
11 #include "build/build_config.h" 11 #include "build/build_config.h"
12 12
13 using base::internal::PlatformThreadLocalStorage; 13 using base::internal::PlatformThreadLocalStorage;
14 14
15 // Chrome Thread Local Storage (TLS)
brettw 2016/10/04 03:33:57 This comment is awesome! Thanks.
robliao 2016/10/04 19:19:52 Thanks. Now I just need to figure out why this fai
robliao 2016/10/06 22:14:42 With the help of https://codereview.chromium.org/2
16 //
17 // This TLS system allows Chrome to use a single OS level TLS slot process-wide,
18 // and allows us to control the slot limits instead of being at the mercy of the
19 // platform. To do this, Chrome TLS replicates an array commonly found in the OS
20 // thread metadata.
21 //
22 // Overview:
23 //
24 // OS TLS Slots Per-Thread Per-Process Global
25 // ...
26 // [] Chrome TLS Array Chrome TLS Metadata
27 // [] ----------> [][][][][ ][][][][] [][][][][ ][][][][]
28 // [] | |
29 // ... V V
30 // Metadata Version Slot Information
31 // Your Data!
32 //
33 // Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
34 // lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
35 // array matches the length of the per-process global metadata array.
36 //
37 // A per-process global TLS metadata array tracks information about each item in
38 // the per-thread array:
39 // * Status: Tracks if the slot is allocated or free to assign.
40 // * Destructor: An optional destructor to call on thread destruction for that
41 // specific slot.
42 // * Version: Tracks the current version of the TLS slot. Each TLS slot
43 // allocation is associated with a unique version number.
44 //
45 // Most OS TLS APIs guarantee that a newly allocated TLS slot is
46 // initialized to 0 for all threads. The Chrome TLS system provides
47 // this guarantee by tracking the version for each TLS slot here
48 // on each per-thread Chrome TLS array entry. Threads that access
49 // a slot with a mismatched version will receive 0 as their value.
50 // The metadata version is incremented when the client frees a
51 // slot. The per-thread metadata version is updated when a client
52 // writes to the slot. This scheme allows for constant time
53 // invalidation and avoids the need to iterate through each Chrome
54 // TLS array to mark the slot as zero.
55 //
56 // Just like an OS TLS API, clients of the Chrome TLS are responsible for
57 // managing any necessary lifetime of the data in their slots. The only
58 // convenience provided is automatic destruction when a thread ends. If a client
59 // frees a slot, that client is responsible for destroying the data in the slot.
60
15 namespace { 61 namespace {
16 // In order to make TLS destructors work, we need to keep around a function 62 // In order to make TLS destructors work, we need to keep around a function
17 // pointer to the destructor for each slot. We keep this array of pointers in a 63 // pointer to the destructor for each slot. We keep this array of pointers in a
18 // global (static) array. 64 // global (static) array.
19 // We use the single OS-level TLS slot (giving us one pointer per thread) to 65 // We use the single OS-level TLS slot (giving us one pointer per thread) to
20 // hold a pointer to a per-thread array (table) of slots that we allocate to 66 // hold a pointer to a per-thread array (table) of slots that we allocate to
21 // Chromium consumers. 67 // Chromium consumers.
22 68
23 // g_native_tls_key is the one native TLS that we use. It stores our table. 69 // g_native_tls_key is the one native TLS that we use. It stores our table.
24 base::subtle::Atomic32 g_native_tls_key = 70 base::subtle::Atomic32 g_native_tls_key =
25 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES; 71 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
26 72
27 // The maximum number of slots in our thread local storage stack. 73 // The maximum number of slots in our thread local storage stack.
28 constexpr int kThreadLocalStorageSize = 256; 74 constexpr int kThreadLocalStorageSize = 256;
29 constexpr int kInvalidSlotValue = -1; 75 constexpr int kInvalidSlotValue = -1;
30 76
31 enum TlsStatus { 77 enum TlsStatus {
32 FREE, 78 FREE,
33 IN_USE, 79 IN_USE,
34 }; 80 };
35 81
36 struct TlsMetadata { 82 struct TlsMetadata {
37 TlsStatus status; 83 TlsStatus status;
38 base::ThreadLocalStorage::TLSDestructorFunc destructor; 84 base::ThreadLocalStorage::TLSDestructorFunc destructor;
85 uint32_t version;
86 };
87
88 struct TlsVectorEntry {
89 void* data;
90 uint32_t version;
39 }; 91 };
40 92
41 // This LazyInstance isn't needed until after we've constructed the per-thread 93 // This LazyInstance isn't needed until after we've constructed the per-thread
42 // TLS vector, so it's safe to use. 94 // TLS vector, so it's safe to use.
43 base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock; 95 base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock;
44 TlsMetadata g_tls_metadata[kThreadLocalStorageSize]; 96 TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
45 size_t g_last_assigned_slot = 0; 97 size_t g_last_assigned_slot = 0;
46 98
47 // The maximum number of times to try to clear slots by calling destructors. 99 // The maximum number of times to try to clear slots by calling destructors.
48 // Use pthread naming convention for clarity. 100 // Use pthread naming convention for clarity.
49 constexpr int kMaxDestructorIterations = kThreadLocalStorageSize; 101 constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
50 102
51 // This function is called to initialize our entire Chromium TLS system. 103 // This function is called to initialize our entire Chromium TLS system.
52 // It may be called very early, and we need to complete most all of the setup 104 // It may be called very early, and we need to complete most all of the setup
53 // (initialization) before calling *any* memory allocator functions, which may 105 // (initialization) before calling *any* memory allocator functions, which may
54 // recursively depend on this initialization. 106 // recursively depend on this initialization.
55 // As a result, we use Atomics, and avoid anything (like a singleton) that might 107 // As a result, we use Atomics, and avoid anything (like a singleton) that might
56 // require memory allocations. 108 // require memory allocations.
57 void** ConstructTlsVector() { 109 TlsVectorEntry* ConstructTlsVector() {
58 PlatformThreadLocalStorage::TLSKey key = 110 PlatformThreadLocalStorage::TLSKey key =
59 base::subtle::NoBarrier_Load(&g_native_tls_key); 111 base::subtle::NoBarrier_Load(&g_native_tls_key);
60 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { 112 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
61 CHECK(PlatformThreadLocalStorage::AllocTLS(&key)); 113 CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
62 114
63 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or 115 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
64 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we 116 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
65 // define an almost impossible value be it. 117 // define an almost impossible value be it.
66 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc 118 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
67 // another TLS slot. 119 // another TLS slot.
(...skipping 21 matching lines...) Expand all
89 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key)); 141 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
90 142
91 // Some allocators, such as TCMalloc, make use of thread local storage. As a 143 // Some allocators, such as TCMalloc, make use of thread local storage. As a
92 // result, any attempt to call new (or malloc) will lazily cause such a system 144 // result, any attempt to call new (or malloc) will lazily cause such a system
93 // to initialize, which will include registering for a TLS key. If we are not 145 // to initialize, which will include registering for a TLS key. If we are not
94 // careful here, then that request to create a key will call new back, and 146 // careful here, then that request to create a key will call new back, and
95 // we'll have an infinite loop. We avoid that as follows: Use a stack 147 // we'll have an infinite loop. We avoid that as follows: Use a stack
96 // allocated vector, so that we don't have dependence on our allocator until 148 // allocated vector, so that we don't have dependence on our allocator until
97 // our service is in place. (i.e., don't even call new until after we're 149 // our service is in place. (i.e., don't even call new until after we're
98 // setup) 150 // setup)
99 void* stack_allocated_tls_data[kThreadLocalStorageSize]; 151 TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
100 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data)); 152 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
101 // Ensure that any rentrant calls change the temp version. 153 // Ensure that any rentrant calls change the temp version.
102 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); 154 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
103 155
104 // Allocate an array to store our data. 156 // Allocate an array to store our data.
105 void** tls_data = new void*[kThreadLocalStorageSize]; 157 TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
106 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data)); 158 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
107 PlatformThreadLocalStorage::SetTLSValue(key, tls_data); 159 PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
108 return tls_data; 160 return tls_data;
109 } 161 }
110 162
111 void OnThreadExitInternal(void* value) { 163 void OnThreadExitInternal(TlsVectorEntry* tls_data) {
112 DCHECK(value); 164 DCHECK(tls_data);
113 void** tls_data = static_cast<void**>(value);
114 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread 165 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
115 // terminates, one of the destructor calls we make may be to shut down an 166 // terminates, one of the destructor calls we make may be to shut down an
116 // allocator. We have to be careful that after we've shutdown all of the known 167 // allocator. We have to be careful that after we've shutdown all of the known
117 // destructors (perchance including an allocator), that we don't call the 168 // destructors (perchance including an allocator), that we don't call the
118 // allocator and cause it to resurrect itself (with no possibly destructor 169 // allocator and cause it to resurrect itself (with no possibly destructor
119 // call to follow). We handle this problem as follows: Switch to using a stack 170 // call to follow). We handle this problem as follows: Switch to using a stack
120 // allocated vector, so that we don't have dependence on our allocator after 171 // allocated vector, so that we don't have dependence on our allocator after
121 // we have called all g_tls_metadata destructors. (i.e., don't even call 172 // we have called all g_tls_metadata destructors. (i.e., don't even call
122 // delete[] after we're done with destructors.) 173 // delete[] after we're done with destructors.)
123 void* stack_allocated_tls_data[kThreadLocalStorageSize]; 174 TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
124 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data)); 175 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
125 // Ensure that any re-entrant calls change the temp version. 176 // Ensure that any re-entrant calls change the temp version.
126 PlatformThreadLocalStorage::TLSKey key = 177 PlatformThreadLocalStorage::TLSKey key =
127 base::subtle::NoBarrier_Load(&g_native_tls_key); 178 base::subtle::NoBarrier_Load(&g_native_tls_key);
128 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); 179 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
129 delete[] tls_data; // Our last dependence on an allocator. 180 delete[] tls_data; // Our last dependence on an allocator.
130 181
131 // Snapshot the TLS Metadata so we don't have to lock on every access. 182 // Snapshot the TLS Metadata so we don't have to lock on every access.
132 TlsMetadata tls_metadata[kThreadLocalStorageSize]; 183 TlsMetadata tls_metadata[kThreadLocalStorageSize];
133 { 184 {
134 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); 185 base::AutoLock auto_lock(g_tls_metadata_lock.Get());
135 memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata)); 186 memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
136 } 187 }
137 188
138 int remaining_attempts = kMaxDestructorIterations; 189 int remaining_attempts = kMaxDestructorIterations;
139 bool need_to_scan_destructors = true; 190 bool need_to_scan_destructors = true;
140 while (need_to_scan_destructors) { 191 while (need_to_scan_destructors) {
141 need_to_scan_destructors = false; 192 need_to_scan_destructors = false;
142 // Try to destroy the first-created-slot (which is slot 1) in our last 193 // Try to destroy the first-created-slot (which is slot 1) in our last
143 // destructor call. That user was able to function, and define a slot with 194 // destructor call. That user was able to function, and define a slot with
144 // no other services running, so perhaps it is a basic service (like an 195 // no other services running, so perhaps it is a basic service (like an
145 // allocator) and should also be destroyed last. If we get the order wrong, 196 // allocator) and should also be destroyed last. If we get the order wrong,
146 // then we'll iterate several more times, so it is really not that critical 197 // then we'll iterate several more times, so it is really not that critical
147 // (but it might help). 198 // (but it might help).
148 for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) { 199 for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) {
149 void* tls_value = stack_allocated_tls_data[slot]; 200 void* tls_value = stack_allocated_tls_data[slot].data;
150 if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE) 201 if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
202 stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
151 continue; 203 continue;
152 204
153 base::ThreadLocalStorage::TLSDestructorFunc destructor = 205 base::ThreadLocalStorage::TLSDestructorFunc destructor =
154 tls_metadata[slot].destructor; 206 tls_metadata[slot].destructor;
155 if (!destructor) 207 if (!destructor)
156 continue; 208 continue;
157 stack_allocated_tls_data[slot] = nullptr; // pre-clear the slot. 209 stack_allocated_tls_data[slot].data = nullptr; // pre-clear the slot.
158 destructor(tls_value); 210 destructor(tls_value);
159 // Any destructor might have called a different service, which then set a 211 // Any destructor might have called a different service, which then set a
160 // different slot to a non-null value. Hence we need to check the whole 212 // different slot to a non-null value. Hence we need to check the whole
161 // vector again. This is a pthread standard. 213 // vector again. This is a pthread standard.
162 need_to_scan_destructors = true; 214 need_to_scan_destructors = true;
163 } 215 }
164 if (--remaining_attempts <= 0) { 216 if (--remaining_attempts <= 0) {
165 NOTREACHED(); // Destructors might not have been called. 217 NOTREACHED(); // Destructors might not have been called.
166 break; 218 break;
167 } 219 }
(...skipping 12 matching lines...) Expand all
180 #if defined(OS_WIN) 232 #if defined(OS_WIN)
181 void PlatformThreadLocalStorage::OnThreadExit() { 233 void PlatformThreadLocalStorage::OnThreadExit() {
182 PlatformThreadLocalStorage::TLSKey key = 234 PlatformThreadLocalStorage::TLSKey key =
183 base::subtle::NoBarrier_Load(&g_native_tls_key); 235 base::subtle::NoBarrier_Load(&g_native_tls_key);
184 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) 236 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
185 return; 237 return;
186 void *tls_data = GetTLSValue(key); 238 void *tls_data = GetTLSValue(key);
187 // Maybe we have never initialized TLS for this thread. 239 // Maybe we have never initialized TLS for this thread.
188 if (!tls_data) 240 if (!tls_data)
189 return; 241 return;
190 OnThreadExitInternal(tls_data); 242 OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
191 } 243 }
192 #elif defined(OS_POSIX) 244 #elif defined(OS_POSIX)
193 void PlatformThreadLocalStorage::OnThreadExit(void* value) { 245 void PlatformThreadLocalStorage::OnThreadExit(void* value) {
194 OnThreadExitInternal(value); 246 OnThreadExitInternal(static_cast<TlsVectorEntry*>(value));
195 } 247 }
196 #endif // defined(OS_WIN) 248 #endif // defined(OS_WIN)
197 249
198 } // namespace internal 250 } // namespace internal
199 251
200 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { 252 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
201 PlatformThreadLocalStorage::TLSKey key = 253 PlatformThreadLocalStorage::TLSKey key =
202 base::subtle::NoBarrier_Load(&g_native_tls_key); 254 base::subtle::NoBarrier_Load(&g_native_tls_key);
203 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES || 255 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
204 !PlatformThreadLocalStorage::GetTLSValue(key)) { 256 !PlatformThreadLocalStorage::GetTLSValue(key)) {
205 ConstructTlsVector(); 257 ConstructTlsVector();
206 } 258 }
207 259
208 // Grab a new slot. 260 // Grab a new slot.
209 slot_ = kInvalidSlotValue; 261 slot_ = kInvalidSlotValue;
262 version_ = 0;
210 { 263 {
211 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); 264 base::AutoLock auto_lock(g_tls_metadata_lock.Get());
212 for (int i = 0; i < kThreadLocalStorageSize; ++i) { 265 for (int i = 0; i < kThreadLocalStorageSize; ++i) {
213 // Tracking the last assigned slot is an attempt to find the next 266 // Tracking the last assigned slot is an attempt to find the next
214 // available slot within one iteration. Under normal usage, slots remain 267 // available slot within one iteration. Under normal usage, slots remain
215 // in use for the lifetime of the process (otherwise before we reclaimed 268 // in use for the lifetime of the process (otherwise before we reclaimed
216 // slots, we would have run out of slots). This makes it highly likely the 269 // slots, we would have run out of slots). This makes it highly likely the
217 // next slot is going to be a free slot. 270 // next slot is going to be a free slot.
218 size_t slot_candidate = 271 size_t slot_candidate =
219 (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize; 272 (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
220 if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) { 273 if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
221 g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE; 274 g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
222 g_tls_metadata[slot_candidate].destructor = destructor; 275 g_tls_metadata[slot_candidate].destructor = destructor;
223 g_last_assigned_slot = slot_candidate; 276 g_last_assigned_slot = slot_candidate;
224 slot_ = slot_candidate; 277 slot_ = slot_candidate;
278 version_ = g_tls_metadata[slot_candidate].version;
225 break; 279 break;
226 } 280 }
227 } 281 }
228 } 282 }
229 CHECK_NE(slot_, kInvalidSlotValue); 283 CHECK_NE(slot_, kInvalidSlotValue);
230 CHECK_LT(slot_, kThreadLocalStorageSize); 284 CHECK_LT(slot_, kThreadLocalStorageSize);
231 285
232 // Setup our destructor. 286 // Setup our destructor.
233 base::subtle::Release_Store(&initialized_, 1); 287 base::subtle::Release_Store(&initialized_, 1);
234 } 288 }
235 289
236 void ThreadLocalStorage::StaticSlot::Free() { 290 void ThreadLocalStorage::StaticSlot::Free() {
237 DCHECK_NE(slot_, kInvalidSlotValue); 291 DCHECK_NE(slot_, kInvalidSlotValue);
238 DCHECK_LT(slot_, kThreadLocalStorageSize); 292 DCHECK_LT(slot_, kThreadLocalStorageSize);
239 { 293 {
240 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); 294 base::AutoLock auto_lock(g_tls_metadata_lock.Get());
241 g_tls_metadata[slot_].status = TlsStatus::FREE; 295 g_tls_metadata[slot_].status = TlsStatus::FREE;
242 g_tls_metadata[slot_].destructor = nullptr; 296 g_tls_metadata[slot_].destructor = nullptr;
297 ++(g_tls_metadata[slot_].version);
243 } 298 }
244 slot_ = kInvalidSlotValue; 299 slot_ = kInvalidSlotValue;
245 base::subtle::Release_Store(&initialized_, 0); 300 base::subtle::Release_Store(&initialized_, 0);
246 } 301 }
247 302
248 void* ThreadLocalStorage::StaticSlot::Get() const { 303 void* ThreadLocalStorage::StaticSlot::Get() const {
249 void** tls_data = static_cast<void**>( 304 TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
250 PlatformThreadLocalStorage::GetTLSValue( 305 PlatformThreadLocalStorage::GetTLSValue(
251 base::subtle::NoBarrier_Load(&g_native_tls_key))); 306 base::subtle::NoBarrier_Load(&g_native_tls_key)));
252 if (!tls_data) 307 if (!tls_data)
253 tls_data = ConstructTlsVector(); 308 tls_data = ConstructTlsVector();
254 DCHECK_NE(slot_, kInvalidSlotValue); 309 DCHECK_NE(slot_, kInvalidSlotValue);
255 DCHECK_LT(slot_, kThreadLocalStorageSize); 310 DCHECK_LT(slot_, kThreadLocalStorageSize);
256 return tls_data[slot_]; 311 // Version mismatches means this slot was previously freed.
312 if (tls_data[slot_].version != version_)
313 return nullptr;
314 return tls_data[slot_].data;
257 } 315 }
258 316
259 void ThreadLocalStorage::StaticSlot::Set(void* value) { 317 void ThreadLocalStorage::StaticSlot::Set(void* value) {
260 void** tls_data = static_cast<void**>( 318 TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
261 PlatformThreadLocalStorage::GetTLSValue( 319 PlatformThreadLocalStorage::GetTLSValue(
262 base::subtle::NoBarrier_Load(&g_native_tls_key))); 320 base::subtle::NoBarrier_Load(&g_native_tls_key)));
263 if (!tls_data) 321 if (!tls_data)
264 tls_data = ConstructTlsVector(); 322 tls_data = ConstructTlsVector();
265 DCHECK_NE(slot_, kInvalidSlotValue); 323 DCHECK_NE(slot_, kInvalidSlotValue);
266 DCHECK_LT(slot_, kThreadLocalStorageSize); 324 DCHECK_LT(slot_, kThreadLocalStorageSize);
267 tls_data[slot_] = value; 325 tls_data[slot_].data = value;
326 tls_data[slot_].version = version_;
268 } 327 }
269 328
270 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) { 329 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
271 tls_slot_.Initialize(destructor); 330 tls_slot_.Initialize(destructor);
272 } 331 }
273 332
274 ThreadLocalStorage::Slot::~Slot() { 333 ThreadLocalStorage::Slot::~Slot() {
275 tls_slot_.Free(); 334 tls_slot_.Free();
276 } 335 }
277 336
278 void* ThreadLocalStorage::Slot::Get() const { 337 void* ThreadLocalStorage::Slot::Get() const {
279 return tls_slot_.Get(); 338 return tls_slot_.Get();
280 } 339 }
281 340
282 void ThreadLocalStorage::Slot::Set(void* value) { 341 void ThreadLocalStorage::Slot::Set(void* value) {
283 tls_slot_.Set(value); 342 tls_slot_.Set(value);
284 } 343 }
285 344
286 } // namespace base 345 } // namespace base
OLDNEW
« no previous file with comments | « base/threading/thread_local_storage.h ('k') | base/threading/thread_local_storage_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698