OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 24 matching lines...) Expand all Loading... |
35 | 35 |
36 #include <config.h> | 36 #include <config.h> |
37 #ifdef HAVE_PTHREAD | 37 #ifdef HAVE_PTHREAD |
38 #include <pthread.h> // for pthread_t, pthread_key_t | 38 #include <pthread.h> // for pthread_t, pthread_key_t |
39 #endif | 39 #endif |
40 #include <stddef.h> // for size_t, NULL | 40 #include <stddef.h> // for size_t, NULL |
41 #ifdef HAVE_STDINT_H | 41 #ifdef HAVE_STDINT_H |
42 #include <stdint.h> // for uint32_t, uint64_t | 42 #include <stdint.h> // for uint32_t, uint64_t |
43 #endif | 43 #endif |
44 #include <sys/types.h> // for ssize_t | 44 #include <sys/types.h> // for ssize_t |
| 45 #include "common.h" |
| 46 #include "linked_list.h" |
| 47 #include "maybe_threads.h" |
| 48 #include "page_heap_allocator.h" |
| 49 #include "sampler.h" |
| 50 #include "static_vars.h" |
| 51 |
45 #include "common.h" // for SizeMap, kMaxSize, etc | 52 #include "common.h" // for SizeMap, kMaxSize, etc |
46 #include "free_list.h" // for FL_Pop, FL_PopRange, etc | |
47 #include "internal_logging.h" // for ASSERT, etc | 53 #include "internal_logging.h" // for ASSERT, etc |
48 #include "maybe_threads.h" | 54 #include "linked_list.h" // for SLL_Pop, SLL_PopRange, etc |
49 #include "page_heap_allocator.h" // for PageHeapAllocator | 55 #include "page_heap_allocator.h" // for PageHeapAllocator |
50 #include "sampler.h" // for Sampler | 56 #include "sampler.h" // for Sampler |
51 #include "static_vars.h" // for Static | 57 #include "static_vars.h" // for Static |
52 | 58 |
53 namespace tcmalloc { | 59 namespace tcmalloc { |
54 | 60 |
55 // Even if we have support for thread-local storage in the compiler | 61 // Even if we have support for thread-local storage in the compiler |
56 // and linker, the OS may not support it. We need to check that at | 62 // and linker, the OS may not support it. We need to check that at |
57 // runtime. Right now, we have to keep a manual set of "bad" OSes. | 63 // runtime. Right now, we have to keep a manual set of "bad" OSes. |
58 #if defined(HAVE_TLS) | 64 #if defined(HAVE_TLS) |
(...skipping 22 matching lines...) Expand all Loading... |
81 | 87 |
82 // Total byte size in cache | 88 // Total byte size in cache |
83 size_t Size() const { return size_; } | 89 size_t Size() const { return size_; } |
84 | 90 |
85 // Allocate an object of the given size and class. The size given | 91 // Allocate an object of the given size and class. The size given |
86 // must be the same as the size of the class in the size map. | 92 // must be the same as the size of the class in the size map. |
87 void* Allocate(size_t size, size_t cl); | 93 void* Allocate(size_t size, size_t cl); |
88 void Deallocate(void* ptr, size_t size_class); | 94 void Deallocate(void* ptr, size_t size_class); |
89 | 95 |
90 void Scavenge(); | 96 void Scavenge(); |
91 void Print(TCMalloc_Printer* out) const; | |
92 | 97 |
93 int GetSamplePeriod(); | 98 int GetSamplePeriod(); |
94 | 99 |
95 // Record allocation of "k" bytes. Return true iff allocation | 100 // Record allocation of "k" bytes. Return true iff allocation |
96 // should be sampled | 101 // should be sampled |
97 bool SampleAllocation(size_t k); | 102 bool SampleAllocation(size_t k); |
98 | 103 |
99 static void InitModule(); | 104 static void InitModule(); |
100 static void InitTSD(); | 105 static void InitTSD(); |
101 static ThreadCache* GetThreadHeap(); | 106 static ThreadCache* GetThreadHeap(); |
102 static ThreadCache* GetCache(); | 107 static ThreadCache* GetCache(); |
103 static ThreadCache* GetCacheIfPresent(); | 108 static ThreadCache* GetCacheIfPresent(); |
104 static ThreadCache* CreateCacheIfNecessary(); | 109 static ThreadCache* CreateCacheIfNecessary(); |
105 static void BecomeIdle(); | 110 static void BecomeIdle(); |
106 | 111 |
107 // Return the number of thread heaps in use. | 112 // Return the number of thread heaps in use. |
108 static inline int HeapsInUse(); | 113 static inline int HeapsInUse(); |
109 | 114 |
110 // Writes to total_bytes the total number of bytes used by all thread heaps. | 115 // Writes to total_bytes the total number of bytes used by all thread heaps. |
111 // class_count must be an array of size kNumClasses. Writes the number of | 116 // class_count must be an array of size kNumClasses. Writes the number of |
112 // items on the corresponding freelist. class_count may be NULL. | 117 // items on the corresponding freelist. class_count may be NULL. |
113 // The storage of both parameters must be zero intialized. | 118 // The storage of both parameters must be zero intialized. |
114 // REQUIRES: Static::pageheap_lock is held. | 119 // REQUIRES: Static::pageheap_lock is held. |
115 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count); | 120 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count); |
116 | 121 |
117 // Write debugging statistics to 'out'. | |
118 // REQUIRES: Static::pageheap_lock is held. | |
119 static void PrintThreads(TCMalloc_Printer* out); | |
120 | |
121 // Sets the total thread cache size to new_size, recomputing the | 122 // Sets the total thread cache size to new_size, recomputing the |
122 // individual thread cache sizes as necessary. | 123 // individual thread cache sizes as necessary. |
123 // REQUIRES: Static::pageheap lock is held. | 124 // REQUIRES: Static::pageheap lock is held. |
124 static void set_overall_thread_cache_size(size_t new_size); | 125 static void set_overall_thread_cache_size(size_t new_size); |
125 static size_t overall_thread_cache_size() { | 126 static size_t overall_thread_cache_size() { |
126 return overall_thread_cache_size_; | 127 return overall_thread_cache_size_; |
127 } | 128 } |
128 | 129 |
129 private: | 130 private: |
130 class FreeList { | 131 class FreeList { |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
185 // Is list empty? | 186 // Is list empty? |
186 bool empty() const { | 187 bool empty() const { |
187 return list_ == NULL; | 188 return list_ == NULL; |
188 } | 189 } |
189 | 190 |
190 // Low-water mark management | 191 // Low-water mark management |
191 int lowwatermark() const { return lowater_; } | 192 int lowwatermark() const { return lowater_; } |
192 void clear_lowwatermark() { lowater_ = length_; } | 193 void clear_lowwatermark() { lowater_ = length_; } |
193 | 194 |
194 void Push(void* ptr) { | 195 void Push(void* ptr) { |
195 FL_Push(&list_, ptr); | 196 SLL_Push(&list_, ptr); |
196 length_++; | 197 length_++; |
197 } | 198 } |
198 | 199 |
199 void* Pop() { | 200 void* Pop() { |
200 ASSERT(list_ != NULL); | 201 ASSERT(list_ != NULL); |
201 length_--; | 202 length_--; |
202 if (length_ < lowater_) lowater_ = length_; | 203 if (length_ < lowater_) lowater_ = length_; |
203 return FL_Pop(&list_); | 204 return SLL_Pop(&list_); |
| 205 } |
| 206 |
| 207 void* Next() { |
| 208 return SLL_Next(&list_); |
204 } | 209 } |
205 | 210 |
206 void PushRange(int N, void *start, void *end) { | 211 void PushRange(int N, void *start, void *end) { |
207 FL_PushRange(&list_, start, end); | 212 SLL_PushRange(&list_, start, end); |
208 length_ += N; | 213 length_ += N; |
209 } | 214 } |
210 | 215 |
211 void PopRange(int N, void **start, void **end) { | 216 void PopRange(int N, void **start, void **end) { |
212 FL_PopRange(&list_, N, start, end); | 217 SLL_PopRange(&list_, N, start, end); |
213 ASSERT(length_ >= N); | 218 ASSERT(length_ >= N); |
214 length_ -= N; | 219 length_ -= N; |
215 if (length_ < lowater_) lowater_ = length_; | 220 if (length_ < lowater_) lowater_ = length_; |
216 } | 221 } |
217 }; | 222 }; |
218 | 223 |
219 // Gets and returns an object from the central cache, and, if possible, | 224 // Gets and returns an object from the central cache, and, if possible, |
220 // also adds some objects of that size class to this thread cache. | 225 // also adds some objects of that size class to this thread cache. |
221 void* FetchFromCentralCache(size_t cl, size_t byte_size); | 226 void* FetchFromCentralCache(size_t cl, size_t byte_size); |
222 | 227 |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 return FetchFromCentralCache(cl, size); | 341 return FetchFromCentralCache(cl, size); |
337 } | 342 } |
338 size_ -= size; | 343 size_ -= size; |
339 return list->Pop(); | 344 return list->Pop(); |
340 } | 345 } |
341 | 346 |
342 inline void ThreadCache::Deallocate(void* ptr, size_t cl) { | 347 inline void ThreadCache::Deallocate(void* ptr, size_t cl) { |
343 FreeList* list = &list_[cl]; | 348 FreeList* list = &list_[cl]; |
344 size_ += Static::sizemap()->ByteSizeForClass(cl); | 349 size_ += Static::sizemap()->ByteSizeForClass(cl); |
345 ssize_t size_headroom = max_size_ - size_ - 1; | 350 ssize_t size_headroom = max_size_ - size_ - 1; |
| 351 |
| 352 // This catches back-to-back frees of allocs in the same size |
| 353 // class. A more comprehensive (and expensive) test would be to walk |
| 354 // the entire freelist. But this might be enough to find some bugs. |
| 355 ASSERT(ptr != list->Next()); |
| 356 |
346 list->Push(ptr); | 357 list->Push(ptr); |
347 ssize_t list_headroom = | 358 ssize_t list_headroom = |
348 static_cast<ssize_t>(list->max_length()) - list->length(); | 359 static_cast<ssize_t>(list->max_length()) - list->length(); |
349 | 360 |
350 // There are two relatively uncommon things that require further work. | 361 // There are two relatively uncommon things that require further work. |
351 // In the common case we're done, and in that case we need a single branch | 362 // In the common case we're done, and in that case we need a single branch |
352 // because of the bitwise-or trick that follows. | 363 // because of the bitwise-or trick that follows. |
353 if ((list_headroom | size_headroom) < 0) { | 364 if ((list_headroom | size_headroom) < 0) { |
354 if (list_headroom < 0) { | 365 if (list_headroom < 0) { |
355 ListTooLong(list, cl); | 366 ListTooLong(list, cl); |
(...skipping 27 matching lines...) Expand all Loading... |
383 // because we may be in the thread destruction code and may have | 394 // because we may be in the thread destruction code and may have |
384 // already cleaned up the cache for this thread. | 395 // already cleaned up the cache for this thread. |
385 inline ThreadCache* ThreadCache::GetCacheIfPresent() { | 396 inline ThreadCache* ThreadCache::GetCacheIfPresent() { |
386 if (!tsd_inited_) return NULL; | 397 if (!tsd_inited_) return NULL; |
387 return GetThreadHeap(); | 398 return GetThreadHeap(); |
388 } | 399 } |
389 | 400 |
390 } // namespace tcmalloc | 401 } // namespace tcmalloc |
391 | 402 |
392 #endif // TCMALLOC_THREAD_CACHE_H_ | 403 #endif // TCMALLOC_THREAD_CACHE_H_ |
OLD | NEW |