Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(130)

Side by Side Diff: third_party/tcmalloc/chromium/src/central_freelist.cc

Issue 9310021: [NOT TO COMMIT!] Merge Chromium-specific changes in tcmalloc thru. the original gperftools r136. (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Fixed some build inhibitor. Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 15 matching lines...) Expand all
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 // --- 30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com> 31 // Author: Sanjay Ghemawat <opensource@google.com>
32 32
33 #include "config.h" 33 #include "config.h"
34 #include <algorithm> 34 #include <algorithm>
35 #include "central_freelist.h" 35 #include "central_freelist.h"
36 #include "free_list.h" // for FL_Next, FL_Push, etc
36 #include "internal_logging.h" // for ASSERT, MESSAGE 37 #include "internal_logging.h" // for ASSERT, MESSAGE
37 #include "linked_list.h" // for SLL_Next, SLL_Push, etc
38 #include "page_heap.h" // for PageHeap 38 #include "page_heap.h" // for PageHeap
39 #include "static_vars.h" // for Static 39 #include "static_vars.h" // for Static
40 40
41 using std::min; 41 using std::min;
42 using std::max; 42 using std::max;
43 43
44 namespace tcmalloc { 44 namespace tcmalloc {
45 45
46 void CentralFreeList::Init(size_t cl) { 46 void CentralFreeList::Init(size_t cl) {
47 size_class_ = cl; 47 size_class_ = cl;
(...skipping 25 matching lines...) Expand all
73 max_cache_size_ = (min)(max_cache_size_, 73 max_cache_size_ = (min)(max_cache_size_,
74 (max)(1, (1024 * 1024) / (bytes * objs_to_move))); 74 (max)(1, (1024 * 1024) / (bytes * objs_to_move)));
75 cache_size_ = (min)(cache_size_, max_cache_size_); 75 cache_size_ = (min)(cache_size_, max_cache_size_);
76 } 76 }
77 used_slots_ = 0; 77 used_slots_ = 0;
78 ASSERT(cache_size_ <= max_cache_size_); 78 ASSERT(cache_size_ <= max_cache_size_);
79 } 79 }
80 80
81 void CentralFreeList::ReleaseListToSpans(void* start) { 81 void CentralFreeList::ReleaseListToSpans(void* start) {
82 while (start) { 82 while (start) {
83 void *next = SLL_Next(start); 83 void *next = FL_Next(start);
84 ReleaseToSpans(start); 84 ReleaseToSpans(start);
85 start = next; 85 start = next;
86 } 86 }
87 } 87 }
88 88
89 // MapObjectToSpan should logically be part of ReleaseToSpans. But 89 // MapObjectToSpan should logically be part of ReleaseToSpans. But
90 // this triggers an optimization bug in gcc 4.5.0. Moving to a 90 // this triggers an optimization bug in gcc 4.5.0. Moving to a
91 // separate function, and making sure that function isn't inlined, 91 // separate function, and making sure that function isn't inlined,
92 // seems to fix the problem. It also should be fixed for gcc 4.5.1. 92 // seems to fix the problem. It also should be fixed for gcc 4.5.1.
93 static 93 static
(...skipping 15 matching lines...) Expand all
109 if (span->objects == NULL) { 109 if (span->objects == NULL) {
110 tcmalloc::DLL_Remove(span); 110 tcmalloc::DLL_Remove(span);
111 tcmalloc::DLL_Prepend(&nonempty_, span); 111 tcmalloc::DLL_Prepend(&nonempty_, span);
112 Event(span, 'N', 0); 112 Event(span, 'N', 0);
113 } 113 }
114 114
115 // The following check is expensive, so it is disabled by default 115 // The following check is expensive, so it is disabled by default
116 if (false) { 116 if (false) {
117 // Check that object does not occur in list 117 // Check that object does not occur in list
118 int got = 0; 118 int got = 0;
119 for (void* p = span->objects; p != NULL; p = *((void**) p)) { 119 for (void* p = span->objects; p != NULL; p = FL_Next(p)){
120 ASSERT(p != object); 120 ASSERT(p != object);
121 got++; 121 got++;
122 } 122 }
123 ASSERT(got + span->refcount == 123 ASSERT(got + span->refcount ==
124 (span->length<<kPageShift) / 124 (span->length<<kPageShift) /
125 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 125 Static::sizemap()->ByteSizeForClass(span->sizeclass));
126 } 126 }
127 127
128 counter_++; 128 counter_++;
129 span->refcount--; 129 span->refcount--;
130 if (span->refcount == 0) { 130 if (span->refcount == 0) {
131 Event(span, '#', 0); 131 Event(span, '#', 0);
132 counter_ -= ((span->length<<kPageShift) / 132 counter_ -= ((span->length<<kPageShift) /
133 Static::sizemap()->ByteSizeForClass(span->sizeclass)); 133 Static::sizemap()->ByteSizeForClass(span->sizeclass));
134 tcmalloc::DLL_Remove(span); 134 tcmalloc::DLL_Remove(span);
135 --num_spans_; 135 --num_spans_;
136 136
137 // Release central list lock while operating on pageheap 137 // Release central list lock while operating on pageheap
138 lock_.Unlock(); 138 lock_.Unlock();
139 { 139 {
140 SpinLockHolder h(Static::pageheap_lock()); 140 SpinLockHolder h(Static::pageheap_lock());
141 Static::pageheap()->Delete(span); 141 Static::pageheap()->Delete(span);
142 } 142 }
143 lock_.Lock(); 143 lock_.Lock();
144 } else { 144 } else {
145 *(reinterpret_cast<void**>(object)) = span->objects; 145 FL_Push(&(span->objects), object);
146 span->objects = object;
147 } 146 }
148 } 147 }
149 148
150 bool CentralFreeList::EvictRandomSizeClass( 149 bool CentralFreeList::EvictRandomSizeClass(
151 int locked_size_class, bool force) { 150 int locked_size_class, bool force) {
152 static int race_counter = 0; 151 static int race_counter = 0;
153 int t = race_counter++; // Updated without a lock, but who cares. 152 int t = race_counter++; // Updated without a lock, but who cares.
154 if (t >= kNumClasses) { 153 if (t >= kNumClasses) {
155 while (t >= kNumClasses) { 154 while (t >= kNumClasses) {
156 t -= kNumClasses; 155 t -= kNumClasses;
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
255 lock_.Unlock(); 254 lock_.Unlock();
256 return N; 255 return N;
257 } 256 }
258 257
259 int result = 0; 258 int result = 0;
260 void* head = NULL; 259 void* head = NULL;
261 void* tail = NULL; 260 void* tail = NULL;
262 // TODO: Prefetch multiple TCEntries? 261 // TODO: Prefetch multiple TCEntries?
263 tail = FetchFromSpansSafe(); 262 tail = FetchFromSpansSafe();
264 if (tail != NULL) { 263 if (tail != NULL) {
265 SLL_SetNext(tail, NULL); 264 FL_Push(&head, tail);
266 head = tail;
267 result = 1; 265 result = 1;
268 while (result < N) { 266 while (result < N) {
269 void *t = FetchFromSpans(); 267 void *t = FetchFromSpans();
270 if (!t) break; 268 if (!t) break;
271 SLL_Push(&head, t); 269 FL_Push(&head, t);
272 result++; 270 result++;
273 } 271 }
274 } 272 }
275 lock_.Unlock(); 273 lock_.Unlock();
276 *start = head; 274 *start = head;
277 *end = tail; 275 *end = tail;
278 return result; 276 return result;
279 } 277 }
280 278
281 279
282 void* CentralFreeList::FetchFromSpansSafe() { 280 void* CentralFreeList::FetchFromSpansSafe() {
283 void *t = FetchFromSpans(); 281 void *t = FetchFromSpans();
284 if (!t) { 282 if (!t) {
285 Populate(); 283 Populate();
286 t = FetchFromSpans(); 284 t = FetchFromSpans();
287 } 285 }
288 return t; 286 return t;
289 } 287 }
290 288
291 void* CentralFreeList::FetchFromSpans() { 289 void* CentralFreeList::FetchFromSpans() {
292 if (tcmalloc::DLL_IsEmpty(&nonempty_)) return NULL; 290 if (tcmalloc::DLL_IsEmpty(&nonempty_)) return NULL;
293 Span* span = nonempty_.next; 291 Span* span = nonempty_.next;
294 292
295 ASSERT(span->objects != NULL); 293 ASSERT(span->objects != NULL);
296 span->refcount++; 294 span->refcount++;
297 void* result = span->objects; 295 void *result = FL_Pop(&(span->objects));
298 span->objects = *(reinterpret_cast<void**>(result));
299 if (span->objects == NULL) { 296 if (span->objects == NULL) {
300 // Move to empty list 297 // Move to empty list
301 tcmalloc::DLL_Remove(span); 298 tcmalloc::DLL_Remove(span);
302 tcmalloc::DLL_Prepend(&empty_, span); 299 tcmalloc::DLL_Prepend(&empty_, span);
303 Event(span, 'E', 0); 300 Event(span, 'E', 0);
304 } 301 }
305 counter_--; 302 counter_--;
306 return result; 303 return result;
307 } 304 }
308 305
(...skipping 18 matching lines...) Expand all
327 ASSERT(span->length == npages); 324 ASSERT(span->length == npages);
328 // Cache sizeclass info eagerly. Locking is not necessary. 325 // Cache sizeclass info eagerly. Locking is not necessary.
329 // (Instead of being eager, we could just replace any stale info 326 // (Instead of being eager, we could just replace any stale info
330 // about this span, but that seems to be no better in practice.) 327 // about this span, but that seems to be no better in practice.)
331 for (int i = 0; i < npages; i++) { 328 for (int i = 0; i < npages; i++) {
332 Static::pageheap()->CacheSizeClass(span->start + i, size_class_); 329 Static::pageheap()->CacheSizeClass(span->start + i, size_class_);
333 } 330 }
334 331
335 // Split the block into pieces and add to the free-list 332 // Split the block into pieces and add to the free-list
336 // TODO: coloring of objects to avoid cache conflicts? 333 // TODO: coloring of objects to avoid cache conflicts?
337 void** tail = &span->objects; 334 void* list = NULL;
338 char* ptr = reinterpret_cast<char*>(span->start << kPageShift); 335 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
339 char* limit = ptr + (npages << kPageShift); 336 char* limit = ptr + (npages << kPageShift);
340 const size_t size = Static::sizemap()->ByteSizeForClass(size_class_); 337 const size_t size = Static::sizemap()->ByteSizeForClass(size_class_);
341 int num = 0; 338 int num = 0;
342 while (ptr + size <= limit) { 339 while (ptr + size <= limit) {
343 *tail = ptr; 340 FL_Push(&list, ptr);
344 tail = reinterpret_cast<void**>(ptr);
345 ptr += size; 341 ptr += size;
346 num++; 342 num++;
347 } 343 }
348 ASSERT(ptr <= limit); 344 ASSERT(ptr <= limit);
349 *tail = NULL; 345 span->objects = list;
350 span->refcount = 0; // No sub-object in use yet 346 span->refcount = 0; // No sub-object in use yet
351 347
352 // Add span to list of non-empty spans 348 // Add span to list of non-empty spans
353 lock_.Lock(); 349 lock_.Lock();
354 tcmalloc::DLL_Prepend(&nonempty_, span); 350 tcmalloc::DLL_Prepend(&nonempty_, span);
355 ++num_spans_; 351 ++num_spans_;
356 counter_ += num; 352 counter_ += num;
357 } 353 }
358 354
359 int CentralFreeList::tc_length() { 355 int CentralFreeList::tc_length() {
360 SpinLockHolder h(&lock_); 356 SpinLockHolder h(&lock_);
361 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_); 357 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
362 } 358 }
363 359
364 size_t CentralFreeList::OverheadBytes() { 360 size_t CentralFreeList::OverheadBytes() {
365 SpinLockHolder h(&lock_); 361 SpinLockHolder h(&lock_);
366 if (size_class_ == 0) { // 0 holds the 0-sized allocations 362 if (size_class_ == 0) { // 0 holds the 0-sized allocations
367 return 0; 363 return 0;
368 } 364 }
369 const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_); 365 const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
370 const size_t object_size = Static::sizemap()->class_to_size(size_class_); 366 const size_t object_size = Static::sizemap()->class_to_size(size_class_);
371 ASSERT(object_size > 0); 367 ASSERT(object_size > 0);
372 const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size; 368 const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
373 return num_spans_ * overhead_per_span; 369 return num_spans_ * overhead_per_span;
374 } 370 }
375 371
376 } // namespace tcmalloc 372 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/vdso_support.cc ('k') | third_party/tcmalloc/chromium/src/common.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698