OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/threading/thread_local_storage.h" | 5 #include "base/threading/thread_local_storage.h" |
6 | 6 |
7 #include "base/atomicops.h" | 7 #include "base/atomicops.h" |
| 8 #include "base/lazy_instance.h" |
8 #include "base/logging.h" | 9 #include "base/logging.h" |
| 10 #include "base/synchronization/lock.h" |
9 #include "build/build_config.h" | 11 #include "build/build_config.h" |
10 | 12 |
11 using base::internal::PlatformThreadLocalStorage; | 13 using base::internal::PlatformThreadLocalStorage; |
12 | 14 |
13 namespace { | 15 namespace { |
14 // In order to make TLS destructors work, we need to keep around a function | 16 // In order to make TLS destructors work, we need to keep around a function |
15 // pointer to the destructor for each slot. We keep this array of pointers in a | 17 // pointer to the destructor for each slot. We keep this array of pointers in a |
16 // global (static) array. | 18 // global (static) array. |
17 // We use the single OS-level TLS slot (giving us one pointer per thread) to | 19 // We use the single OS-level TLS slot (giving us one pointer per thread) to |
18 // hold a pointer to a per-thread array (table) of slots that we allocate to | 20 // hold a pointer to a per-thread array (table) of slots that we allocate to |
19 // Chromium consumers. | 21 // Chromium consumers. |
20 | 22 |
21 // g_native_tls_key is the one native TLS that we use. It stores our table. | 23 // g_native_tls_key is the one native TLS that we use. It stores our table. |
22 base::subtle::Atomic32 g_native_tls_key = | 24 base::subtle::Atomic32 g_native_tls_key = |
23 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES; | 25 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES; |
24 | 26 |
25 // g_last_used_tls_key is the high-water-mark of allocated thread local storage. | 27 // The maximum number of slots in our thread local storage stack. |
26 // Each allocation is an index into our g_tls_destructors[]. Each such index is | 28 constexpr int kThreadLocalStorageSize = 256; |
27 // assigned to the instance variable slot_ in a ThreadLocalStorage::Slot | 29 constexpr int kInvalidSlotValue = -1; |
28 // instance. We reserve the value slot_ == 0 to indicate that the corresponding | |
29 // instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called, | |
30 // etc.). This reserved use of 0 is then stated as the initial value of | |
31 // g_last_used_tls_key, so that the first issued index will be 1. | |
32 base::subtle::Atomic32 g_last_used_tls_key = 0; | |
33 | 30 |
34 // The maximum number of 'slots' in our thread local storage stack. | 31 enum TlsStatus { |
35 const int kThreadLocalStorageSize = 256; | 32 FREE, |
| 33 IN_USE, |
| 34 }; |
| 35 |
| 36 struct TlsMetadata { |
| 37 TlsStatus status; |
| 38 base::ThreadLocalStorage::TLSDestructorFunc destructor; |
| 39 }; |
| 40 |
| 41 // This LazyInstance isn't needed until after we've constructed the per-thread |
| 42 // TLS vector, so it's safe to use. |
| 43 base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock; |
| 44 TlsMetadata g_tls_metadata[kThreadLocalStorageSize]; |
| 45 size_t g_last_assigned_slot = 0; |
36 | 46 |
37 // The maximum number of times to try to clear slots by calling destructors. | 47 // The maximum number of times to try to clear slots by calling destructors. |
38 // Use pthread naming convention for clarity. | 48 // Use pthread naming convention for clarity. |
39 const int kMaxDestructorIterations = kThreadLocalStorageSize; | 49 constexpr int kMaxDestructorIterations = kThreadLocalStorageSize; |
40 | |
41 // An array of destructor function pointers for the slots. If a slot has a | |
42 // destructor, it will be stored in its corresponding entry in this array. | |
43 // The elements are volatile to ensure that when the compiler reads the value | |
44 // to potentially call the destructor, it does so once, and that value is tested | |
45 // for null-ness and then used. Yes, that would be a weird de-optimization, | |
46 // but I can imagine some register machines where it was just as easy to | |
47 // re-fetch an array element, and I want to be sure a call to free the key | |
48 // (i.e., null out the destructor entry) that happens on a separate thread can't | |
49 // hurt the racy calls to the destructors on another thread. | |
50 volatile base::ThreadLocalStorage::TLSDestructorFunc | |
51 g_tls_destructors[kThreadLocalStorageSize]; | |
52 | 50 |
53 // This function is called to initialize our entire Chromium TLS system. | 51 // This function is called to initialize our entire Chromium TLS system. |
54 // It may be called very early, and we need to complete most all of the setup | 52 // It may be called very early, and we need to complete most all of the setup |
55 // (initialization) before calling *any* memory allocator functions, which may | 53 // (initialization) before calling *any* memory allocator functions, which may |
56 // recursively depend on this initialization. | 54 // recursively depend on this initialization. |
57 // As a result, we use Atomics, and avoid anything (like a singleton) that might | 55 // As a result, we use Atomics, and avoid anything (like a singleton) that might |
58 // require memory allocations. | 56 // require memory allocations. |
59 void** ConstructTlsVector() { | 57 void** ConstructTlsVector() { |
60 PlatformThreadLocalStorage::TLSKey key = | 58 PlatformThreadLocalStorage::TLSKey key = |
61 base::subtle::NoBarrier_Load(&g_native_tls_key); | 59 base::subtle::NoBarrier_Load(&g_native_tls_key); |
62 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { | 60 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { |
63 CHECK(PlatformThreadLocalStorage::AllocTLS(&key)); | 61 CHECK(PlatformThreadLocalStorage::AllocTLS(&key)); |
64 | 62 |
65 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or | 63 // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or |
66 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we | 64 // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we |
67 // define an almost impossible value be it. | 65 // define an almost impossible value be it. |
68 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc | 66 // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc |
69 // another TLS slot. | 67 // another TLS slot. |
70 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { | 68 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) { |
71 PlatformThreadLocalStorage::TLSKey tmp = key; | 69 PlatformThreadLocalStorage::TLSKey tmp = key; |
72 CHECK(PlatformThreadLocalStorage::AllocTLS(&key) && | 70 CHECK(PlatformThreadLocalStorage::AllocTLS(&key) && |
73 key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES); | 71 key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES); |
74 PlatformThreadLocalStorage::FreeTLS(tmp); | 72 PlatformThreadLocalStorage::FreeTLS(tmp); |
75 } | 73 } |
76 // Atomically test-and-set the tls_key. If the key is | 74 // Atomically test-and-set the tls_key. If the key is |
77 // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as | 75 // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as |
78 // another thread already did our dirty work. | 76 // another thread already did our dirty work. |
79 if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES != | 77 if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES != |
80 static_cast<PlatformThreadLocalStorage::TLSKey>( | 78 static_cast<PlatformThreadLocalStorage::TLSKey>( |
81 base::subtle::NoBarrier_CompareAndSwap( | 79 base::subtle::NoBarrier_CompareAndSwap( |
82 &g_native_tls_key, | 80 &g_native_tls_key, |
83 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) { | 81 PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) { |
84 // We've been shortcut. Another thread replaced g_native_tls_key first so | 82 // We've been shortcut. Another thread replaced g_native_tls_key first so |
85 // we need to destroy our index and use the one the other thread got | 83 // we need to destroy our index and use the one the other thread got |
86 // first. | 84 // first. |
87 PlatformThreadLocalStorage::FreeTLS(key); | 85 PlatformThreadLocalStorage::FreeTLS(key); |
88 key = base::subtle::NoBarrier_Load(&g_native_tls_key); | 86 key = base::subtle::NoBarrier_Load(&g_native_tls_key); |
89 } | 87 } |
90 } | 88 } |
91 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key)); | 89 CHECK(!PlatformThreadLocalStorage::GetTLSValue(key)); |
92 | 90 |
93 // Some allocators, such as TCMalloc, make use of thread local storage. | 91 // Some allocators, such as TCMalloc, make use of thread local storage. As a |
94 // As a result, any attempt to call new (or malloc) will lazily cause such a | 92 // result, any attempt to call new (or malloc) will lazily cause such a system |
95 // system to initialize, which will include registering for a TLS key. If we | 93 // to initialize, which will include registering for a TLS key. If we are not |
96 // are not careful here, then that request to create a key will call new back, | 94 // careful here, then that request to create a key will call new back, and |
97 // and we'll have an infinite loop. We avoid that as follows: | 95 // we'll have an infinite loop. We avoid that as follows: Use a stack |
98 // Use a stack allocated vector, so that we don't have dependence on our | 96 // allocated vector, so that we don't have dependence on our allocator until |
99 // allocator until our service is in place. (i.e., don't even call new until | 97 // our service is in place. (i.e., don't even call new until after we're |
100 // after we're setup) | 98 // setup) |
101 void* stack_allocated_tls_data[kThreadLocalStorageSize]; | 99 void* stack_allocated_tls_data[kThreadLocalStorageSize]; |
102 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data)); | 100 memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data)); |
103 // Ensure that any rentrant calls change the temp version. | 101 // Ensure that any rentrant calls change the temp version. |
104 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); | 102 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); |
105 | 103 |
106 // Allocate an array to store our data. | 104 // Allocate an array to store our data. |
107 void** tls_data = new void*[kThreadLocalStorageSize]; | 105 void** tls_data = new void*[kThreadLocalStorageSize]; |
108 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data)); | 106 memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data)); |
109 PlatformThreadLocalStorage::SetTLSValue(key, tls_data); | 107 PlatformThreadLocalStorage::SetTLSValue(key, tls_data); |
110 return tls_data; | 108 return tls_data; |
111 } | 109 } |
112 | 110 |
113 void OnThreadExitInternal(void* value) { | 111 void OnThreadExitInternal(void* value) { |
114 DCHECK(value); | 112 DCHECK(value); |
115 void** tls_data = static_cast<void**>(value); | 113 void** tls_data = static_cast<void**>(value); |
116 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread | 114 // Some allocators, such as TCMalloc, use TLS. As a result, when a thread |
117 // terminates, one of the destructor calls we make may be to shut down an | 115 // terminates, one of the destructor calls we make may be to shut down an |
118 // allocator. We have to be careful that after we've shutdown all of the | 116 // allocator. We have to be careful that after we've shutdown all of the known |
119 // known destructors (perchance including an allocator), that we don't call | 117 // destructors (perchance including an allocator), that we don't call the |
120 // the allocator and cause it to resurrect itself (with no possibly destructor | 118 // allocator and cause it to resurrect itself (with no possibly destructor |
121 // call to follow). We handle this problem as follows: | 119 // call to follow). We handle this problem as follows: Switch to using a stack |
122 // Switch to using a stack allocated vector, so that we don't have dependence | 120 // allocated vector, so that we don't have dependence on our allocator after |
123 // on our allocator after we have called all g_tls_destructors. (i.e., don't | 121 // we have called all g_tls_metadata destructors. (i.e., don't even call |
124 // even call delete[] after we're done with destructors.) | 122 // delete[] after we're done with destructors.) |
125 void* stack_allocated_tls_data[kThreadLocalStorageSize]; | 123 void* stack_allocated_tls_data[kThreadLocalStorageSize]; |
126 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data)); | 124 memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data)); |
127 // Ensure that any re-entrant calls change the temp version. | 125 // Ensure that any re-entrant calls change the temp version. |
128 PlatformThreadLocalStorage::TLSKey key = | 126 PlatformThreadLocalStorage::TLSKey key = |
129 base::subtle::NoBarrier_Load(&g_native_tls_key); | 127 base::subtle::NoBarrier_Load(&g_native_tls_key); |
130 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); | 128 PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data); |
131 delete[] tls_data; // Our last dependence on an allocator. | 129 delete[] tls_data; // Our last dependence on an allocator. |
132 | 130 |
| 131 // Snapshot the TLS Metadata so we don't have to lock on every access. |
| 132 TlsMetadata tls_metadata[kThreadLocalStorageSize]; |
| 133 { |
| 134 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); |
| 135 memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata)); |
| 136 } |
| 137 |
133 int remaining_attempts = kMaxDestructorIterations; | 138 int remaining_attempts = kMaxDestructorIterations; |
134 bool need_to_scan_destructors = true; | 139 bool need_to_scan_destructors = true; |
135 while (need_to_scan_destructors) { | 140 while (need_to_scan_destructors) { |
136 need_to_scan_destructors = false; | 141 need_to_scan_destructors = false; |
137 // Try to destroy the first-created-slot (which is slot 1) in our last | 142 // Try to destroy the first-created-slot (which is slot 1) in our last |
138 // destructor call. That user was able to function, and define a slot with | 143 // destructor call. That user was able to function, and define a slot with |
139 // no other services running, so perhaps it is a basic service (like an | 144 // no other services running, so perhaps it is a basic service (like an |
140 // allocator) and should also be destroyed last. If we get the order wrong, | 145 // allocator) and should also be destroyed last. If we get the order wrong, |
141 // then we'll itterate several more times, so it is really not that | 146 // then we'll iterate several more times, so it is really not that critical |
142 // critical (but it might help). | 147 // (but it might help). |
143 base::subtle::Atomic32 last_used_tls_key = | 148 for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) { |
144 base::subtle::NoBarrier_Load(&g_last_used_tls_key); | |
145 for (int slot = last_used_tls_key; slot > 0; --slot) { | |
146 void* tls_value = stack_allocated_tls_data[slot]; | 149 void* tls_value = stack_allocated_tls_data[slot]; |
147 if (tls_value == NULL) | 150 if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE) |
148 continue; | 151 continue; |
149 | 152 |
150 base::ThreadLocalStorage::TLSDestructorFunc destructor = | 153 base::ThreadLocalStorage::TLSDestructorFunc destructor = |
151 g_tls_destructors[slot]; | 154 tls_metadata[slot].destructor; |
152 if (destructor == NULL) | 155 if (!destructor) |
153 continue; | 156 continue; |
154 stack_allocated_tls_data[slot] = NULL; // pre-clear the slot. | 157 stack_allocated_tls_data[slot] = nullptr; // pre-clear the slot. |
155 destructor(tls_value); | 158 destructor(tls_value); |
156 // Any destructor might have called a different service, which then set | 159 // Any destructor might have called a different service, which then set a |
157 // a different slot to a non-NULL value. Hence we need to check | 160 // different slot to a non-null value. Hence we need to check the whole |
158 // the whole vector again. This is a pthread standard. | 161 // vector again. This is a pthread standard. |
159 need_to_scan_destructors = true; | 162 need_to_scan_destructors = true; |
160 } | 163 } |
161 if (--remaining_attempts <= 0) { | 164 if (--remaining_attempts <= 0) { |
162 NOTREACHED(); // Destructors might not have been called. | 165 NOTREACHED(); // Destructors might not have been called. |
163 break; | 166 break; |
164 } | 167 } |
165 } | 168 } |
166 | 169 |
167 // Remove our stack allocated vector. | 170 // Remove our stack allocated vector. |
168 PlatformThreadLocalStorage::SetTLSValue(key, NULL); | 171 PlatformThreadLocalStorage::SetTLSValue(key, nullptr); |
169 } | 172 } |
170 | 173 |
171 } // namespace | 174 } // namespace |
172 | 175 |
173 namespace base { | 176 namespace base { |
174 | 177 |
175 namespace internal { | 178 namespace internal { |
176 | 179 |
177 #if defined(OS_WIN) | 180 #if defined(OS_WIN) |
178 void PlatformThreadLocalStorage::OnThreadExit() { | 181 void PlatformThreadLocalStorage::OnThreadExit() { |
(...skipping 12 matching lines...) Expand all Loading... |
191 OnThreadExitInternal(value); | 194 OnThreadExitInternal(value); |
192 } | 195 } |
193 #endif // defined(OS_WIN) | 196 #endif // defined(OS_WIN) |
194 | 197 |
195 } // namespace internal | 198 } // namespace internal |
196 | 199 |
197 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { | 200 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { |
198 PlatformThreadLocalStorage::TLSKey key = | 201 PlatformThreadLocalStorage::TLSKey key = |
199 base::subtle::NoBarrier_Load(&g_native_tls_key); | 202 base::subtle::NoBarrier_Load(&g_native_tls_key); |
200 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES || | 203 if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES || |
201 !PlatformThreadLocalStorage::GetTLSValue(key)) | 204 !PlatformThreadLocalStorage::GetTLSValue(key)) { |
202 ConstructTlsVector(); | 205 ConstructTlsVector(); |
| 206 } |
203 | 207 |
204 // Grab a new slot. | 208 // Grab a new slot. |
205 slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1); | 209 slot_ = kInvalidSlotValue; |
206 DCHECK_GT(slot_, 0); | 210 { |
| 211 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); |
| 212 for (int i = 0; i < kThreadLocalStorageSize; ++i) { |
| 213 // Tracking the last assigned slot is an attempt to find the next |
| 214 // available slot within one iteration. Under normal usage, slots remain |
| 215 // in use for the lifetime of the process (otherwise before we reclaimed |
| 216 // slots, we would have run out of slots). This makes it highly likely the |
| 217 // next slot is going to be a free slot. |
| 218 size_t slot_candidate = |
| 219 (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize; |
| 220 if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) { |
| 221 g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE; |
| 222 g_tls_metadata[slot_candidate].destructor = destructor; |
| 223 g_last_assigned_slot = slot_candidate; |
| 224 slot_ = slot_candidate; |
| 225 break; |
| 226 } |
| 227 } |
| 228 } |
| 229 CHECK_NE(slot_, kInvalidSlotValue); |
207 CHECK_LT(slot_, kThreadLocalStorageSize); | 230 CHECK_LT(slot_, kThreadLocalStorageSize); |
208 | 231 |
209 // Setup our destructor. | 232 // Setup our destructor. |
210 g_tls_destructors[slot_] = destructor; | |
211 base::subtle::Release_Store(&initialized_, 1); | 233 base::subtle::Release_Store(&initialized_, 1); |
212 } | 234 } |
213 | 235 |
214 void ThreadLocalStorage::StaticSlot::Free() { | 236 void ThreadLocalStorage::StaticSlot::Free() { |
215 // At this time, we don't reclaim old indices for TLS slots. | 237 DCHECK_NE(slot_, kInvalidSlotValue); |
216 // So all we need to do is wipe the destructor. | |
217 DCHECK_GT(slot_, 0); | |
218 DCHECK_LT(slot_, kThreadLocalStorageSize); | 238 DCHECK_LT(slot_, kThreadLocalStorageSize); |
219 g_tls_destructors[slot_] = NULL; | 239 { |
220 slot_ = 0; | 240 base::AutoLock auto_lock(g_tls_metadata_lock.Get()); |
| 241 g_tls_metadata[slot_].status = TlsStatus::FREE; |
| 242 g_tls_metadata[slot_].destructor = nullptr; |
| 243 } |
| 244 slot_ = kInvalidSlotValue; |
221 base::subtle::Release_Store(&initialized_, 0); | 245 base::subtle::Release_Store(&initialized_, 0); |
222 } | 246 } |
223 | 247 |
224 void* ThreadLocalStorage::StaticSlot::Get() const { | 248 void* ThreadLocalStorage::StaticSlot::Get() const { |
225 void** tls_data = static_cast<void**>( | 249 void** tls_data = static_cast<void**>( |
226 PlatformThreadLocalStorage::GetTLSValue( | 250 PlatformThreadLocalStorage::GetTLSValue( |
227 base::subtle::NoBarrier_Load(&g_native_tls_key))); | 251 base::subtle::NoBarrier_Load(&g_native_tls_key))); |
228 if (!tls_data) | 252 if (!tls_data) |
229 tls_data = ConstructTlsVector(); | 253 tls_data = ConstructTlsVector(); |
230 DCHECK_GT(slot_, 0); | 254 DCHECK_NE(slot_, kInvalidSlotValue); |
231 DCHECK_LT(slot_, kThreadLocalStorageSize); | 255 DCHECK_LT(slot_, kThreadLocalStorageSize); |
232 return tls_data[slot_]; | 256 return tls_data[slot_]; |
233 } | 257 } |
234 | 258 |
235 void ThreadLocalStorage::StaticSlot::Set(void* value) { | 259 void ThreadLocalStorage::StaticSlot::Set(void* value) { |
236 void** tls_data = static_cast<void**>( | 260 void** tls_data = static_cast<void**>( |
237 PlatformThreadLocalStorage::GetTLSValue( | 261 PlatformThreadLocalStorage::GetTLSValue( |
238 base::subtle::NoBarrier_Load(&g_native_tls_key))); | 262 base::subtle::NoBarrier_Load(&g_native_tls_key))); |
239 if (!tls_data) | 263 if (!tls_data) |
240 tls_data = ConstructTlsVector(); | 264 tls_data = ConstructTlsVector(); |
241 DCHECK_GT(slot_, 0); | 265 DCHECK_NE(slot_, kInvalidSlotValue); |
242 DCHECK_LT(slot_, kThreadLocalStorageSize); | 266 DCHECK_LT(slot_, kThreadLocalStorageSize); |
243 tls_data[slot_] = value; | 267 tls_data[slot_] = value; |
244 } | 268 } |
245 | 269 |
246 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) { | 270 ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) { |
247 tls_slot_.Initialize(destructor); | 271 tls_slot_.Initialize(destructor); |
248 } | 272 } |
249 | 273 |
250 ThreadLocalStorage::Slot::~Slot() { | 274 ThreadLocalStorage::Slot::~Slot() { |
251 tls_slot_.Free(); | 275 tls_slot_.Free(); |
252 } | 276 } |
253 | 277 |
254 void* ThreadLocalStorage::Slot::Get() const { | 278 void* ThreadLocalStorage::Slot::Get() const { |
255 return tls_slot_.Get(); | 279 return tls_slot_.Get(); |
256 } | 280 } |
257 | 281 |
258 void ThreadLocalStorage::Slot::Set(void* value) { | 282 void ThreadLocalStorage::Slot::Set(void* value) { |
259 tls_slot_.Set(value); | 283 tls_slot_.Set(value); |
260 } | 284 } |
261 | 285 |
262 } // namespace base | 286 } // namespace base |
OLD | NEW |