Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(247)

Side by Side Diff: third_party/tcmalloc/chromium/src/central_freelist.cc

Issue 7430007: Merge tcmalloc r111 (perftools v. 1.8) with the chromium/ branch. Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 13 matching lines...) Expand all
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 // --- 30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com> 31 // Author: Sanjay Ghemawat <opensource@google.com>
32 32
33 #include "config.h" 33 #include "config.h"
34 #include <algorithm>
34 #include "central_freelist.h" 35 #include "central_freelist.h"
35
36 #include "internal_logging.h" // for ASSERT, MESSAGE 36 #include "internal_logging.h" // for ASSERT, MESSAGE
37 #include "linked_list.h" // for SLL_Next, SLL_Push, etc 37 #include "linked_list.h" // for SLL_Next, SLL_Push, etc
38 #include "page_heap.h" // for PageHeap 38 #include "page_heap.h" // for PageHeap
39 #include "static_vars.h" // for Static 39 #include "static_vars.h" // for Static
40 40
41 using std::min;
42 using std::max;
43
41 namespace tcmalloc { 44 namespace tcmalloc {
42 45
43 void CentralFreeList::Init(size_t cl) { 46 void CentralFreeList::Init(size_t cl) {
44 size_class_ = cl; 47 size_class_ = cl;
45 tcmalloc::DLL_Init(&empty_); 48 tcmalloc::DLL_Init(&empty_);
46 tcmalloc::DLL_Init(&nonempty_); 49 tcmalloc::DLL_Init(&nonempty_);
50 num_spans_ = 0;
47 counter_ = 0; 51 counter_ = 0;
48 52
53 max_cache_size_ = kMaxNumTransferEntries;
49 #ifdef TCMALLOC_SMALL_BUT_SLOW 54 #ifdef TCMALLOC_SMALL_BUT_SLOW
50 // Disable the transfer cache for the small footprint case. 55 // Disable the transfer cache for the small footprint case.
51 cache_size_ = 0; 56 cache_size_ = 0;
52 #else 57 #else
53 cache_size_ = 16; 58 cache_size_ = 16;
54 #endif 59 #endif
60 if (cl > 0) {
61 // Limit the maximum size of the cache based on the size class. If this
62 // is not done, large size class objects will consume a lot of memory if
63 // they just sit in the transfer cache.
64 int32_t bytes = Static::sizemap()->ByteSizeForClass(cl);
65 int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl);
66
67 ASSERT(objs_to_move > 0 && bytes > 0);
68 // Limit each size class cache to at most 1MB of objects or one entry,
69 // whichever is greater. Total transfer cache memory used across all
70 // size classes then can't be greater than approximately
71 // 1MB * kMaxNumTransferEntries.
72 // min and max are in parens to avoid macro-expansion on windows.
73 max_cache_size_ = (min)(max_cache_size_,
74 (max)(1, (1024 * 1024) / (bytes * objs_to_move)));
75 cache_size_ = (min)(cache_size_, max_cache_size_);
76 }
55 used_slots_ = 0; 77 used_slots_ = 0;
56 ASSERT(cache_size_ <= kNumTransferEntries); 78 ASSERT(cache_size_ <= max_cache_size_);
57 } 79 }
58 80
59 void CentralFreeList::ReleaseListToSpans(void* start) { 81 void CentralFreeList::ReleaseListToSpans(void* start) {
60 while (start) { 82 while (start) {
61 void *next = SLL_Next(start); 83 void *next = SLL_Next(start);
62 ReleaseToSpans(start); 84 ReleaseToSpans(start);
63 start = next; 85 start = next;
64 } 86 }
65 } 87 }
66 88
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 125 Static::sizemap()->ByteSizeForClass(span->sizeclass));
104 } 126 }
105 127
106 counter_++; 128 counter_++;
107 span->refcount--; 129 span->refcount--;
108 if (span->refcount == 0) { 130 if (span->refcount == 0) {
109 Event(span, '#', 0); 131 Event(span, '#', 0);
110 counter_ -= ((span->length<<kPageShift) / 132 counter_ -= ((span->length<<kPageShift) /
111 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 133 Static::sizemap()->ByteSizeForClass(span->sizeclass));
112 tcmalloc::DLL_Remove(span); 134 tcmalloc::DLL_Remove(span);
135 --num_spans_;
113 136
114 // Release central list lock while operating on pageheap 137 // Release central list lock while operating on pageheap
115 lock_.Unlock(); 138 lock_.Unlock();
116 { 139 {
117 SpinLockHolder h(Static::pageheap_lock()); 140 SpinLockHolder h(Static::pageheap_lock());
118 Static::pageheap()->Delete(span); 141 Static::pageheap()->Delete(span);
119 } 142 }
120 lock_.Lock(); 143 lock_.Lock();
121 } else { 144 } else {
122 *(reinterpret_cast<void**>(object)) = span->objects; 145 *(reinterpret_cast<void**>(object)) = span->objects;
(...skipping 14 matching lines...) Expand all
137 ASSERT(t >= 0); 160 ASSERT(t >= 0);
138 ASSERT(t < kNumClasses); 161 ASSERT(t < kNumClasses);
139 if (t == locked_size_class) return false; 162 if (t == locked_size_class) return false;
140 return Static::central_cache()[t].ShrinkCache(locked_size_class, force); 163 return Static::central_cache()[t].ShrinkCache(locked_size_class, force);
141 } 164 }
142 165
143 bool CentralFreeList::MakeCacheSpace() { 166 bool CentralFreeList::MakeCacheSpace() {
144 // Is there room in the cache? 167 // Is there room in the cache?
145 if (used_slots_ < cache_size_) return true; 168 if (used_slots_ < cache_size_) return true;
146 // Check if we can expand this cache? 169 // Check if we can expand this cache?
147 if (cache_size_ == kNumTransferEntries) return false; 170 if (cache_size_ == max_cache_size_) return false;
148 // Ok, we'll try to grab an entry from some other size class. 171 // Ok, we'll try to grab an entry from some other size class.
149 if (EvictRandomSizeClass(size_class_, false) || 172 if (EvictRandomSizeClass(size_class_, false) ||
150 EvictRandomSizeClass(size_class_, true)) { 173 EvictRandomSizeClass(size_class_, true)) {
151 // Succeeded in evicting, we're going to make our cache larger. 174 // Succeeded in evicting, we're going to make our cache larger.
152 // However, we may have dropped and re-acquired the lock in 175 // However, we may have dropped and re-acquired the lock in
153 // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the 176 // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
154 // cache_size may have changed. Therefore, check and verify that it is 177 // cache_size may have changed. Therefore, check and verify that it is
155 // still OK to increase the cache_size. 178 // still OK to increase the cache_size.
156 if (cache_size_ < kNumTransferEntries) { 179 if (cache_size_ < max_cache_size_) {
157 cache_size_++; 180 cache_size_++;
158 return true; 181 return true;
159 } 182 }
160 } 183 }
161 return false; 184 return false;
162 } 185 }
163 186
164 187
165 namespace { 188 namespace {
166 class LockInverter { 189 class LockInverter {
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 cache_size_--; 226 cache_size_--;
204 return true; 227 return true;
205 } 228 }
206 229
207 void CentralFreeList::InsertRange(void *start, void *end, int N) { 230 void CentralFreeList::InsertRange(void *start, void *end, int N) {
208 SpinLockHolder h(&lock_); 231 SpinLockHolder h(&lock_);
209 if (N == Static::sizemap()->num_objects_to_move(size_class_) && 232 if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
210 MakeCacheSpace()) { 233 MakeCacheSpace()) {
211 int slot = used_slots_++; 234 int slot = used_slots_++;
212 ASSERT(slot >=0); 235 ASSERT(slot >=0);
213 ASSERT(slot < kNumTransferEntries); 236 ASSERT(slot < max_cache_size_);
214 TCEntry *entry = &tc_slots_[slot]; 237 TCEntry *entry = &tc_slots_[slot];
215 entry->head = start; 238 entry->head = start;
216 entry->tail = end; 239 entry->tail = end;
217 return; 240 return;
218 } 241 }
219 ReleaseListToSpans(start); 242 ReleaseListToSpans(start);
220 } 243 }
221 244
222 int CentralFreeList::RemoveRange(void **start, void **end, int N) { 245 int CentralFreeList::RemoveRange(void **start, void **end, int N) {
223 ASSERT(N > 0); 246 ASSERT(N > 0);
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
321 ptr += size; 344 ptr += size;
322 num++; 345 num++;
323 } 346 }
324 ASSERT(ptr <= limit); 347 ASSERT(ptr <= limit);
325 *tail = NULL; 348 *tail = NULL;
326 span->refcount = 0; // No sub-object in use yet 349 span->refcount = 0; // No sub-object in use yet
327 350
328 // Add span to list of non-empty spans 351 // Add span to list of non-empty spans
329 lock_.Lock(); 352 lock_.Lock();
330 tcmalloc::DLL_Prepend(&nonempty_, span); 353 tcmalloc::DLL_Prepend(&nonempty_, span);
354 ++num_spans_;
331 counter_ += num; 355 counter_ += num;
332 } 356 }
333 357
334 int CentralFreeList::tc_length() { 358 int CentralFreeList::tc_length() {
335 SpinLockHolder h(&lock_); 359 SpinLockHolder h(&lock_);
336 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_); 360 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
337 } 361 }
338 362
363 size_t CentralFreeList::OverheadBytes() {
364 SpinLockHolder h(&lock_);
365 if (size_class_ == 0) { // 0 holds the 0-sized allocations
366 return 0;
367 }
368 const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
369 const size_t object_size = Static::sizemap()->class_to_size(size_class_);
370 ASSERT(object_size > 0);
371 const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
372 return num_spans_ * overhead_per_span;
373 }
374
339 } // namespace tcmalloc 375 } // namespace tcmalloc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698