Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: third_party/tcmalloc/chromium/src/thread_cache.h

Issue 9311003: Update the tcmalloc chromium branch to r144 (gperftools 2.0), and merge chromium-specific changes. (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Rebased. Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
81 81
82 // Total byte size in cache 82 // Total byte size in cache
83 size_t Size() const { return size_; } 83 size_t Size() const { return size_; }
84 84
85 // Allocate an object of the given size and class. The size given 85 // Allocate an object of the given size and class. The size given
86 // must be the same as the size of the class in the size map. 86 // must be the same as the size of the class in the size map.
87 void* Allocate(size_t size, size_t cl); 87 void* Allocate(size_t size, size_t cl);
88 void Deallocate(void* ptr, size_t size_class); 88 void Deallocate(void* ptr, size_t size_class);
89 89
90 void Scavenge(); 90 void Scavenge();
91 void Print(TCMalloc_Printer* out) const;
92 91
93 int GetSamplePeriod(); 92 int GetSamplePeriod();
94 93
95 // Record allocation of "k" bytes. Return true iff allocation 94 // Record allocation of "k" bytes. Return true iff allocation
96 // should be sampled 95 // should be sampled
97 bool SampleAllocation(size_t k); 96 bool SampleAllocation(size_t k);
98 97
99 // Record additional bytes allocated. 98 // Record additional bytes allocated.
100 void AddToByteAllocatedTotal(size_t k) { total_bytes_allocated_ += k; } 99 void AddToByteAllocatedTotal(size_t k) { total_bytes_allocated_ += k; }
101 100
(...skipping 16 matching lines...) Expand all
118 // Return the number of thread heaps in use. 117 // Return the number of thread heaps in use.
119 static inline int HeapsInUse(); 118 static inline int HeapsInUse();
120 119
121 // Writes to total_bytes the total number of bytes used by all thread heaps. 120 // Writes to total_bytes the total number of bytes used by all thread heaps.
122 // class_count must be an array of size kNumClasses. Writes the number of 121 // class_count must be an array of size kNumClasses. Writes the number of
123 // items on the corresponding freelist. class_count may be NULL. 122 // items on the corresponding freelist. class_count may be NULL.
124 // The storage of both parameters must be zero intialized. 123 // The storage of both parameters must be zero intialized.
125 // REQUIRES: Static::pageheap_lock is held. 124 // REQUIRES: Static::pageheap_lock is held.
126 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count); 125 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count);
127 126
128 // Write debugging statistics to 'out'.
129 // REQUIRES: Static::pageheap_lock is held.
130 static void PrintThreads(TCMalloc_Printer* out);
131
132 // Sets the total thread cache size to new_size, recomputing the 127 // Sets the total thread cache size to new_size, recomputing the
133 // individual thread cache sizes as necessary. 128 // individual thread cache sizes as necessary.
134 // REQUIRES: Static::pageheap lock is held. 129 // REQUIRES: Static::pageheap lock is held.
135 static void set_overall_thread_cache_size(size_t new_size); 130 static void set_overall_thread_cache_size(size_t new_size);
136 static size_t overall_thread_cache_size() { 131 static size_t overall_thread_cache_size() {
137 return overall_thread_cache_size_; 132 return overall_thread_cache_size_;
138 } 133 }
139 134
140 private: 135 private:
141 class FreeList { 136 class FreeList {
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
207 length_++; 202 length_++;
208 } 203 }
209 204
210 void* Pop() { 205 void* Pop() {
211 ASSERT(list_ != NULL); 206 ASSERT(list_ != NULL);
212 length_--; 207 length_--;
213 if (length_ < lowater_) lowater_ = length_; 208 if (length_ < lowater_) lowater_ = length_;
214 return FL_Pop(&list_); 209 return FL_Pop(&list_);
215 } 210 }
216 211
212 void* Next() {
213 return SLL_Next(&list_);
jar (doing other things) 2012/02/28 22:15:49 Probably add FL_Next() instead of SLL_next.
Dai Mikurube (NOT FULLTIME) 2012/02/28 23:05:43 Done.
214 }
215
217 void PushRange(int N, void *start, void *end) { 216 void PushRange(int N, void *start, void *end) {
218 FL_PushRange(&list_, start, end); 217 FL_PushRange(&list_, start, end);
219 length_ += N; 218 length_ += N;
220 } 219 }
221 220
222 void PopRange(int N, void **start, void **end) { 221 void PopRange(int N, void **start, void **end) {
223 FL_PopRange(&list_, N, start, end); 222 FL_PopRange(&list_, N, start, end);
224 ASSERT(length_ >= N); 223 ASSERT(length_ >= N);
225 length_ -= N; 224 length_ -= N;
226 if (length_ < lowater_) lowater_ = length_; 225 if (length_ < lowater_) lowater_ = length_;
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
359 return FetchFromCentralCache(cl, size); 358 return FetchFromCentralCache(cl, size);
360 } 359 }
361 size_ -= size; 360 size_ -= size;
362 return list->Pop(); 361 return list->Pop();
363 } 362 }
364 363
365 inline void ThreadCache::Deallocate(void* ptr, size_t cl) { 364 inline void ThreadCache::Deallocate(void* ptr, size_t cl) {
366 FreeList* list = &list_[cl]; 365 FreeList* list = &list_[cl];
367 size_ += Static::sizemap()->ByteSizeForClass(cl); 366 size_ += Static::sizemap()->ByteSizeForClass(cl);
368 ssize_t size_headroom = max_size_ - size_ - 1; 367 ssize_t size_headroom = max_size_ - size_ - 1;
368
369 // This catches back-to-back frees of allocs in the same size
370 // class. A more comprehensive (and expensive) test would be to walk
371 // the entire freelist. But this might be enough to find some bugs.
372 ASSERT(ptr != list->Next());
jar (doing other things) 2012/03/01 01:35:05 Since you're having trouble with this assert... I
Dai Mikurube (NOT FULLTIME) 2012/03/01 01:54:05 Thanks, Jim. I agree. It looks that the list SHO
373
369 list->Push(ptr); 374 list->Push(ptr);
370 ssize_t list_headroom = 375 ssize_t list_headroom =
371 static_cast<ssize_t>(list->max_length()) - list->length(); 376 static_cast<ssize_t>(list->max_length()) - list->length();
372 377
373 // There are two relatively uncommon things that require further work. 378 // There are two relatively uncommon things that require further work.
374 // In the common case we're done, and in that case we need a single branch 379 // In the common case we're done, and in that case we need a single branch
375 // because of the bitwise-or trick that follows. 380 // because of the bitwise-or trick that follows.
376 if ((list_headroom | size_headroom) < 0) { 381 if ((list_headroom | size_headroom) < 0) {
377 if (list_headroom < 0) { 382 if (list_headroom < 0) {
378 ListTooLong(list, cl); 383 ListTooLong(list, cl);
(...skipping 27 matching lines...) Expand all
406 // because we may be in the thread destruction code and may have 411 // because we may be in the thread destruction code and may have
407 // already cleaned up the cache for this thread. 412 // already cleaned up the cache for this thread.
408 inline ThreadCache* ThreadCache::GetCacheIfPresent() { 413 inline ThreadCache* ThreadCache::GetCacheIfPresent() {
409 if (!tsd_inited_) return NULL; 414 if (!tsd_inited_) return NULL;
410 return GetThreadHeap(); 415 return GetThreadHeap();
411 } 416 }
412 417
413 } // namespace tcmalloc 418 } // namespace tcmalloc
414 419
415 #endif // TCMALLOC_THREAD_CACHE_H_ 420 #endif // TCMALLOC_THREAD_CACHE_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698