Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(68)

Side by Side Diff: third_party/tcmalloc/chromium/src/page_heap.cc

Issue 9320005: [NOT TO COMMIT!] Replace third_party/tcmalloc/chromium with tcmalloc r136 (the latest). (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 Span* PageHeap::New(Length n) { 93 Span* PageHeap::New(Length n) {
94 ASSERT(Check()); 94 ASSERT(Check());
95 ASSERT(n > 0); 95 ASSERT(n > 0);
96 96
97 Span* result = SearchFreeAndLargeLists(n); 97 Span* result = SearchFreeAndLargeLists(n);
98 if (result != NULL) 98 if (result != NULL)
99 return result; 99 return result;
100 100
101 // Grow the heap and try again. 101 // Grow the heap and try again.
102 if (!GrowHeap(n)) { 102 if (!GrowHeap(n)) {
103 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
104 ASSERT(Check()); 103 ASSERT(Check());
105 return NULL; 104 return NULL;
106 } 105 }
107 return SearchFreeAndLargeLists(n); 106 return SearchFreeAndLargeLists(n);
108 } 107 }
109 108
110 Span* PageHeap::AllocLarge(Length n) { 109 Span* PageHeap::AllocLarge(Length n) {
111 // find the best span (closest to n in size). 110 // find the best span (closest to n in size).
112 // The following loops implements address-ordered best-fit. 111 // The following loops implements address-ordered best-fit.
113 Span *best = NULL; 112 Span *best = NULL;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 Span* leftover = NewSpan(span->start + n, extra); 153 Span* leftover = NewSpan(span->start + n, extra);
155 ASSERT(leftover->location == Span::IN_USE); 154 ASSERT(leftover->location == Span::IN_USE);
156 Event(leftover, 'U', extra); 155 Event(leftover, 'U', extra);
157 RecordSpan(leftover); 156 RecordSpan(leftover);
158 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span 157 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
159 span->length = n; 158 span->length = n;
160 159
161 return leftover; 160 return leftover;
162 } 161 }
163 162
164 void PageHeap::CommitSpan(Span* span) {
165 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
166 static_cast<size_t>(span->length << kPageShift));
167 stats_.committed_bytes += span->length << kPageShift;
168 }
169
170 void PageHeap::DecommitSpan(Span* span) {
171 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
172 static_cast<size_t>(span->length << kPageShift));
173 stats_.committed_bytes -= span->length << kPageShift;
174 }
175
176 Span* PageHeap::Carve(Span* span, Length n) { 163 Span* PageHeap::Carve(Span* span, Length n) {
177 ASSERT(n > 0); 164 ASSERT(n > 0);
178 ASSERT(span->location != Span::IN_USE); 165 ASSERT(span->location != Span::IN_USE);
179 const int old_location = span->location; 166 const int old_location = span->location;
180 RemoveFromFreeList(span); 167 RemoveFromFreeList(span);
181 span->location = Span::IN_USE; 168 span->location = Span::IN_USE;
182 Event(span, 'A', n); 169 Event(span, 'A', n);
183 170
184 const int extra = span->length - n; 171 const int extra = span->length - n;
185 ASSERT(extra >= 0); 172 ASSERT(extra >= 0);
186 if (extra > 0) { 173 if (extra > 0) {
187 Span* leftover = NewSpan(span->start + n, extra); 174 Span* leftover = NewSpan(span->start + n, extra);
188 leftover->location = old_location; 175 leftover->location = old_location;
189 Event(leftover, 'S', extra); 176 Event(leftover, 'S', extra);
190 RecordSpan(leftover); 177 RecordSpan(leftover);
191
192 // The previous span of |leftover| was just splitted -- no need to
193 // coalesce them. The next span of |leftover| was not previously coalesced
194 // with |span|, i.e. is NULL or has got location other than |old_location|.
195 const PageID p = leftover->start;
196 const Length len = leftover->length;
197 Span* next = GetDescriptor(p+len);
198 ASSERT (next == NULL ||
199 next->location == Span::IN_USE ||
200 next->location != leftover->location);
201
202 PrependToFreeList(leftover); // Skip coalescing - no candidates possible 178 PrependToFreeList(leftover); // Skip coalescing - no candidates possible
203 span->length = n; 179 span->length = n;
204 pagemap_.set(span->start + n - 1, span); 180 pagemap_.set(span->start + n - 1, span);
205 } 181 }
206 ASSERT(Check()); 182 ASSERT(Check());
207 if (old_location == Span::ON_RETURNED_FREELIST) {
208 // We need to recommit this address space.
209 CommitSpan(span);
210 }
211 ASSERT(span->location == Span::IN_USE);
212 ASSERT(span->length == n);
213 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
214 return span; 183 return span;
215 } 184 }
216 185
217 void PageHeap::Delete(Span* span) { 186 void PageHeap::Delete(Span* span) {
218 ASSERT(Check()); 187 ASSERT(Check());
219 ASSERT(span->location == Span::IN_USE); 188 ASSERT(span->location == Span::IN_USE);
220 ASSERT(span->length > 0); 189 ASSERT(span->length > 0);
221 ASSERT(GetDescriptor(span->start) == span); 190 ASSERT(GetDescriptor(span->start) == span);
222 ASSERT(GetDescriptor(span->start + span->length - 1) == span); 191 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
223 const Length n = span->length; 192 const Length n = span->length;
224 span->sizeclass = 0; 193 span->sizeclass = 0;
225 span->sample = 0; 194 span->sample = 0;
226 span->location = Span::ON_NORMAL_FREELIST; 195 span->location = Span::ON_NORMAL_FREELIST;
227 Event(span, 'D', span->length); 196 Event(span, 'D', span->length);
228 MergeIntoFreeList(span); // Coalesces if possible 197 MergeIntoFreeList(span); // Coalesces if possible
229 IncrementalScavenge(n); 198 IncrementalScavenge(n);
230 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
231 ASSERT(Check()); 199 ASSERT(Check());
232 } 200 }
233 201
234 void PageHeap::MergeIntoFreeList(Span* span) { 202 void PageHeap::MergeIntoFreeList(Span* span) {
235 ASSERT(span->location != Span::IN_USE); 203 ASSERT(span->location != Span::IN_USE);
236 204
237 // Coalesce -- we guarantee that "p" != 0, so no bounds checking 205 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
238 // necessary. We do not bother resetting the stale pagemap 206 // necessary. We do not bother resetting the stale pagemap
239 // entries for the pieces we are merging together because we only 207 // entries for the pieces we are merging together because we only
240 // care about the pagemap entries for the boundaries. 208 // care about the pagemap entries for the boundaries.
241 // 209 //
242 // Note that the adjacent spans we merge into "span" may come out of a 210 // Note that only similar spans are merged together. For example,
243 // "normal" (committed) list, and cleanly merge with our IN_USE span, which 211 // we do not coalesce "returned" spans with "normal" spans.
244 // is implicitly committed. If the adjacents spans are on the "returned"
245 // (decommitted) list, then we must get both spans into the same state before
246 // or after we coalesce them. The current code always decomits. This is
247 // achieved by blindly decommitting the entire coalesced region, which may
248 // include any combination of committed and decommitted spans, at the end of
249 // the method.
250
251 // TODO(jar): "Always decommit" causes some extra calls to commit when we are
252 // called in GrowHeap() during an allocation :-/. We need to eval the cost of
253 // that oscillation, and possibly do something to reduce it.
254
255 // TODO(jar): We need a better strategy for deciding to commit, or decommit,
256 // based on memory usage and free heap sizes.
257
258 const PageID p = span->start; 212 const PageID p = span->start;
259 const Length n = span->length; 213 const Length n = span->length;
260 Span* prev = GetDescriptor(p-1); 214 Span* prev = GetDescriptor(p-1);
261 if (prev != NULL && prev->location != Span::IN_USE) { 215 if (prev != NULL && prev->location == span->location) {
262 // Merge preceding span into this span 216 // Merge preceding span into this span
263 ASSERT(prev->start + prev->length == p); 217 ASSERT(prev->start + prev->length == p);
264 const Length len = prev->length; 218 const Length len = prev->length;
265 if (prev->location == Span::ON_RETURNED_FREELIST) {
266 // We're about to put the merge span into the returned freelist and call
267 // DecommitSpan() on it, which will mark the entire span including this
268 // one as released and decrease stats_.committed_bytes by the size of the
269 // merged span. To make the math work out we temporarily increase the
270 // stats_.committed_bytes amount.
271 stats_.committed_bytes += prev->length << kPageShift;
272 }
273 RemoveFromFreeList(prev); 219 RemoveFromFreeList(prev);
274 DeleteSpan(prev); 220 DeleteSpan(prev);
275 span->start -= len; 221 span->start -= len;
276 span->length += len; 222 span->length += len;
277 pagemap_.set(span->start, span); 223 pagemap_.set(span->start, span);
278 Event(span, 'L', len); 224 Event(span, 'L', len);
279 } 225 }
280 Span* next = GetDescriptor(p+n); 226 Span* next = GetDescriptor(p+n);
281 if (next != NULL && next->location != Span::IN_USE) { 227 if (next != NULL && next->location == span->location) {
282 // Merge next span into this span 228 // Merge next span into this span
283 ASSERT(next->start == p+n); 229 ASSERT(next->start == p+n);
284 const Length len = next->length; 230 const Length len = next->length;
285 if (next->location == Span::ON_RETURNED_FREELIST) {
286 // See the comment below 'if (prev->location ...' for explanation.
287 stats_.committed_bytes += next->length << kPageShift;
288 }
289 RemoveFromFreeList(next); 231 RemoveFromFreeList(next);
290 DeleteSpan(next); 232 DeleteSpan(next);
291 span->length += len; 233 span->length += len;
292 pagemap_.set(span->start + span->length - 1, span); 234 pagemap_.set(span->start + span->length - 1, span);
293 Event(span, 'R', len); 235 Event(span, 'R', len);
294 } 236 }
295 237
296 Event(span, 'D', span->length);
297 span->location = Span::ON_RETURNED_FREELIST;
298 DecommitSpan(span);
299 PrependToFreeList(span); 238 PrependToFreeList(span);
300 } 239 }
301 240
302 void PageHeap::PrependToFreeList(Span* span) { 241 void PageHeap::PrependToFreeList(Span* span) {
303 ASSERT(span->location != Span::IN_USE); 242 ASSERT(span->location != Span::IN_USE);
304 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_; 243 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
305 if (span->location == Span::ON_NORMAL_FREELIST) { 244 if (span->location == Span::ON_NORMAL_FREELIST) {
306 stats_.free_bytes += (span->length << kPageShift); 245 stats_.free_bytes += (span->length << kPageShift);
307 DLL_Prepend(&list->normal, span); 246 DLL_Prepend(&list->normal, span);
308 } else { 247 } else {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
396 ASSERT(span->location == Span::IN_USE); 335 ASSERT(span->location == Span::IN_USE);
397 ASSERT(GetDescriptor(span->start) == span); 336 ASSERT(GetDescriptor(span->start) == span);
398 ASSERT(GetDescriptor(span->start+span->length-1) == span); 337 ASSERT(GetDescriptor(span->start+span->length-1) == span);
399 Event(span, 'C', sc); 338 Event(span, 'C', sc);
400 span->sizeclass = sc; 339 span->sizeclass = sc;
401 for (Length i = 1; i < span->length-1; i++) { 340 for (Length i = 1; i < span->length-1; i++) {
402 pagemap_.set(span->start+i, span); 341 pagemap_.set(span->start+i, span);
403 } 342 }
404 } 343 }
405 344
406 static double MiB(uint64_t bytes) { 345 void PageHeap::GetSmallSpanStats(SmallSpanStats* result) {
407 return bytes / 1048576.0;
408 }
409
410 static double PagesToMiB(uint64_t pages) {
411 return (pages << kPageShift) / 1048576.0;
412 }
413
414 void PageHeap::GetClassSizes(int64 class_sizes_normal[kMaxPages],
415 int64 class_sizes_returned[kMaxPages],
416 int64* normal_pages_in_spans,
417 int64* returned_pages_in_spans) {
418
419 for (int s = 0; s < kMaxPages; s++) { 346 for (int s = 0; s < kMaxPages; s++) {
420 if (class_sizes_normal != NULL) { 347 result->normal_length[s] = DLL_Length(&free_[s].normal);
421 class_sizes_normal[s] = DLL_Length(&free_[s].normal); 348 result->returned_length[s] = DLL_Length(&free_[s].returned);
422 }
423 if (class_sizes_returned != NULL) {
424 class_sizes_returned[s] = DLL_Length(&free_[s].returned);
425 }
426 }
427
428 if (normal_pages_in_spans != NULL) {
429 *normal_pages_in_spans = 0;
430 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
431 *normal_pages_in_spans += s->length;;
432 }
433 }
434
435 if (returned_pages_in_spans != NULL) {
436 *returned_pages_in_spans = 0;
437 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
438 *returned_pages_in_spans += s->length;
439 }
440 } 349 }
441 } 350 }
442 351
443 void PageHeap::Dump(TCMalloc_Printer* out) { 352 void PageHeap::GetLargeSpanStats(LargeSpanStats* result) {
444 int nonempty_sizes = 0; 353 result->spans = 0;
445 for (int s = 0; s < kMaxPages; s++) { 354 result->normal_pages = 0;
446 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { 355 result->returned_pages = 0;
447 nonempty_sizes++; 356 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
448 } 357 result->normal_pages += s->length;;
358 result->spans++;
449 } 359 }
450 out->printf("------------------------------------------------\n"); 360 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
451 out->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n", 361 result->returned_pages += s->length;
452 nonempty_sizes, MiB(stats_.free_bytes), 362 result->spans++;
453 MiB(stats_.unmapped_bytes));
454 out->printf("------------------------------------------------\n");
455 uint64_t total_normal = 0;
456 uint64_t total_returned = 0;
457 for (int s = 0; s < kMaxPages; s++) {
458 const int n_length = DLL_Length(&free_[s].normal);
459 const int r_length = DLL_Length(&free_[s].returned);
460 if (n_length + r_length > 0) {
461 uint64_t n_pages = s * n_length;
462 uint64_t r_pages = s * r_length;
463 total_normal += n_pages;
464 total_returned += r_pages;
465 out->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
466 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
467 s,
468 (n_length + r_length),
469 PagesToMiB(n_pages + r_pages),
470 PagesToMiB(total_normal + total_returned),
471 PagesToMiB(r_pages),
472 PagesToMiB(total_returned));
473 }
474 } 363 }
475
476 uint64_t n_pages = 0;
477 uint64_t r_pages = 0;
478 int n_spans = 0;
479 int r_spans = 0;
480 out->printf("Normal large spans:\n");
481 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
482 out->printf(" [ %6" PRIuPTR " pages ] %6.1f MiB\n",
483 s->length, PagesToMiB(s->length));
484 n_pages += s->length;
485 n_spans++;
486 }
487 out->printf("Unmapped large spans:\n");
488 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
489 out->printf(" [ %6" PRIuPTR " pages ] %6.1f MiB\n",
490 s->length, PagesToMiB(s->length));
491 r_pages += s->length;
492 r_spans++;
493 }
494 total_normal += n_pages;
495 total_returned += r_pages;
496 out->printf(">255 large * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
497 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
498 (n_spans + r_spans),
499 PagesToMiB(n_pages + r_pages),
500 PagesToMiB(total_normal + total_returned),
501 PagesToMiB(r_pages),
502 PagesToMiB(total_returned));
503 } 364 }
504 365
505 bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) { 366 bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
506 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start)); 367 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
507 if (span == NULL) { 368 if (span == NULL) {
508 return false; 369 return false;
509 } 370 }
510 r->address = span->start << kPageShift; 371 r->address = span->start << kPageShift;
511 r->length = span->length << kPageShift; 372 r->length = span->length << kPageShift;
512 r->fraction = 0; 373 r->fraction = 0;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 ask = n; 414 ask = n;
554 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); 415 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
555 } 416 }
556 if (ptr == NULL) return false; 417 if (ptr == NULL) return false;
557 } 418 }
558 ask = actual_size >> kPageShift; 419 ask = actual_size >> kPageShift;
559 RecordGrowth(ask << kPageShift); 420 RecordGrowth(ask << kPageShift);
560 421
561 uint64_t old_system_bytes = stats_.system_bytes; 422 uint64_t old_system_bytes = stats_.system_bytes;
562 stats_.system_bytes += (ask << kPageShift); 423 stats_.system_bytes += (ask << kPageShift);
563 stats_.committed_bytes += (ask << kPageShift);
564 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; 424 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
565 ASSERT(p > 0); 425 ASSERT(p > 0);
566 426
567 // If we have already a lot of pages allocated, just pre allocate a bunch of 427 // If we have already a lot of pages allocated, just pre allocate a bunch of
568 // memory for the page map. This prevents fragmentation by pagemap metadata 428 // memory for the page map. This prevents fragmentation by pagemap metadata
569 // when a program keeps allocating and freeing large blocks. 429 // when a program keeps allocating and freeing large blocks.
570 430
571 if (old_system_bytes < kPageMapBigAllocationThreshold 431 if (old_system_bytes < kPageMapBigAllocationThreshold
572 && stats_.system_bytes >= kPageMapBigAllocationThreshold) { 432 && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
573 pagemap_.PreallocateMoreMemory(); 433 pagemap_.PreallocateMoreMemory();
574 } 434 }
575 435
576 // Make sure pagemap_ has entries for all of the new pages. 436 // Make sure pagemap_ has entries for all of the new pages.
577 // Plus ensure one before and one after so coalescing code 437 // Plus ensure one before and one after so coalescing code
578 // does not need bounds-checking. 438 // does not need bounds-checking.
579 if (pagemap_.Ensure(p-1, ask+2)) { 439 if (pagemap_.Ensure(p-1, ask+2)) {
580 // Pretend the new area is allocated and then Delete() it to cause 440 // Pretend the new area is allocated and then Delete() it to cause
581 // any necessary coalescing to occur. 441 // any necessary coalescing to occur.
582 Span* span = NewSpan(p, ask); 442 Span* span = NewSpan(p, ask);
583 RecordSpan(span); 443 RecordSpan(span);
584 Delete(span); 444 Delete(span);
585 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
586 ASSERT(Check()); 445 ASSERT(Check());
587 return true; 446 return true;
588 } else { 447 } else {
589 // We could not allocate memory within "pagemap_" 448 // We could not allocate memory within "pagemap_"
590 // TODO: Once we can return memory to the system, return the new span 449 // TODO: Once we can return memory to the system, return the new span
591 return false; 450 return false;
592 } 451 }
593 } 452 }
594 453
595 bool PageHeap::Check() { 454 bool PageHeap::Check() {
(...skipping 19 matching lines...) Expand all
615 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED 474 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
616 CHECK_CONDITION(s->length >= min_pages); 475 CHECK_CONDITION(s->length >= min_pages);
617 CHECK_CONDITION(s->length <= max_pages); 476 CHECK_CONDITION(s->length <= max_pages);
618 CHECK_CONDITION(GetDescriptor(s->start) == s); 477 CHECK_CONDITION(GetDescriptor(s->start) == s);
619 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); 478 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
620 } 479 }
621 return true; 480 return true;
622 } 481 }
623 482
624 } // namespace tcmalloc 483 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/page_heap.h ('k') | third_party/tcmalloc/chromium/src/page_heap_allocator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698