Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Side by Side Diff: third_party/tcmalloc/chromium/src/page_heap.cc

Issue 9310021: [NOT TO COMMIT!] Merge Chromium-specific changes in tcmalloc thru. the original gperftools r136. (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Fixed some build inhibitor. Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 Span* PageHeap::New(Length n) { 93 Span* PageHeap::New(Length n) {
94 ASSERT(Check()); 94 ASSERT(Check());
95 ASSERT(n > 0); 95 ASSERT(n > 0);
96 96
97 Span* result = SearchFreeAndLargeLists(n); 97 Span* result = SearchFreeAndLargeLists(n);
98 if (result != NULL) 98 if (result != NULL)
99 return result; 99 return result;
100 100
101 // Grow the heap and try again. 101 // Grow the heap and try again.
102 if (!GrowHeap(n)) { 102 if (!GrowHeap(n)) {
103 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
103 ASSERT(Check()); 104 ASSERT(Check());
104 return NULL; 105 return NULL;
105 } 106 }
106 return SearchFreeAndLargeLists(n); 107 return SearchFreeAndLargeLists(n);
107 } 108 }
108 109
109 Span* PageHeap::AllocLarge(Length n) { 110 Span* PageHeap::AllocLarge(Length n) {
110 // find the best span (closest to n in size). 111 // find the best span (closest to n in size).
111 // The following loops implements address-ordered best-fit. 112 // The following loops implements address-ordered best-fit.
112 Span *best = NULL; 113 Span *best = NULL;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 Span* leftover = NewSpan(span->start + n, extra); 154 Span* leftover = NewSpan(span->start + n, extra);
154 ASSERT(leftover->location == Span::IN_USE); 155 ASSERT(leftover->location == Span::IN_USE);
155 Event(leftover, 'U', extra); 156 Event(leftover, 'U', extra);
156 RecordSpan(leftover); 157 RecordSpan(leftover);
157 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span 158 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
158 span->length = n; 159 span->length = n;
159 160
160 return leftover; 161 return leftover;
161 } 162 }
162 163
164 void PageHeap::CommitSpan(Span* span) {
165 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
166 static_cast<size_t>(span->length << kPageShift));
167 stats_.committed_bytes += span->length << kPageShift;
168 }
169
170 void PageHeap::DecommitSpan(Span* span) {
171 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
172 static_cast<size_t>(span->length << kPageShift));
173 stats_.committed_bytes -= span->length << kPageShift;
174 }
175
163 Span* PageHeap::Carve(Span* span, Length n) { 176 Span* PageHeap::Carve(Span* span, Length n) {
164 ASSERT(n > 0); 177 ASSERT(n > 0);
165 ASSERT(span->location != Span::IN_USE); 178 ASSERT(span->location != Span::IN_USE);
166 const int old_location = span->location; 179 const int old_location = span->location;
167 RemoveFromFreeList(span); 180 RemoveFromFreeList(span);
168 span->location = Span::IN_USE; 181 span->location = Span::IN_USE;
169 Event(span, 'A', n); 182 Event(span, 'A', n);
170 183
171 const int extra = span->length - n; 184 const int extra = span->length - n;
172 ASSERT(extra >= 0); 185 ASSERT(extra >= 0);
173 if (extra > 0) { 186 if (extra > 0) {
174 Span* leftover = NewSpan(span->start + n, extra); 187 Span* leftover = NewSpan(span->start + n, extra);
175 leftover->location = old_location; 188 leftover->location = old_location;
176 Event(leftover, 'S', extra); 189 Event(leftover, 'S', extra);
177 RecordSpan(leftover); 190 RecordSpan(leftover);
191
192 // The previous span of |leftover| was just splitted -- no need to
193 // coalesce them. The next span of |leftover| was not previously coalesced
194 // with |span|, i.e. is NULL or has got location other than |old_location|.
195 const PageID p = leftover->start;
196 const Length len = leftover->length;
197 Span* next = GetDescriptor(p+len);
198 ASSERT (next == NULL ||
199 next->location == Span::IN_USE ||
200 next->location != leftover->location);
201
178 PrependToFreeList(leftover); // Skip coalescing - no candidates possible 202 PrependToFreeList(leftover); // Skip coalescing - no candidates possible
179 span->length = n; 203 span->length = n;
180 pagemap_.set(span->start + n - 1, span); 204 pagemap_.set(span->start + n - 1, span);
181 } 205 }
182 ASSERT(Check()); 206 ASSERT(Check());
207 if (old_location == Span::ON_RETURNED_FREELIST) {
208 // We need to recommit this address space.
209 CommitSpan(span);
210 }
211 ASSERT(span->location == Span::IN_USE);
212 ASSERT(span->length == n);
213 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
183 return span; 214 return span;
184 } 215 }
185 216
186 void PageHeap::Delete(Span* span) { 217 void PageHeap::Delete(Span* span) {
187 ASSERT(Check()); 218 ASSERT(Check());
188 ASSERT(span->location == Span::IN_USE); 219 ASSERT(span->location == Span::IN_USE);
189 ASSERT(span->length > 0); 220 ASSERT(span->length > 0);
190 ASSERT(GetDescriptor(span->start) == span); 221 ASSERT(GetDescriptor(span->start) == span);
191 ASSERT(GetDescriptor(span->start + span->length - 1) == span); 222 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
192 const Length n = span->length; 223 const Length n = span->length;
193 span->sizeclass = 0; 224 span->sizeclass = 0;
194 span->sample = 0; 225 span->sample = 0;
195 span->location = Span::ON_NORMAL_FREELIST; 226 span->location = Span::ON_NORMAL_FREELIST;
196 Event(span, 'D', span->length); 227 Event(span, 'D', span->length);
197 MergeIntoFreeList(span); // Coalesces if possible 228 MergeIntoFreeList(span); // Coalesces if possible
198 IncrementalScavenge(n); 229 IncrementalScavenge(n);
230 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
199 ASSERT(Check()); 231 ASSERT(Check());
200 } 232 }
201 233
202 void PageHeap::MergeIntoFreeList(Span* span) { 234 void PageHeap::MergeIntoFreeList(Span* span) {
203 ASSERT(span->location != Span::IN_USE); 235 ASSERT(span->location != Span::IN_USE);
204 236
205 // Coalesce -- we guarantee that "p" != 0, so no bounds checking 237 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
206 // necessary. We do not bother resetting the stale pagemap 238 // necessary. We do not bother resetting the stale pagemap
207 // entries for the pieces we are merging together because we only 239 // entries for the pieces we are merging together because we only
208 // care about the pagemap entries for the boundaries. 240 // care about the pagemap entries for the boundaries.
209 // 241 //
210 // Note that only similar spans are merged together. For example, 242 // Note that the adjacent spans we merge into "span" may come out of a
211 // we do not coalesce "returned" spans with "normal" spans. 243 // "normal" (committed) list, and cleanly merge with our IN_USE span, which
244 // is implicitly committed. If the adjacents spans are on the "returned"
245 // (decommitted) list, then we must get both spans into the same state before
246 // or after we coalesce them. The current code always decomits. This is
247 // achieved by blindly decommitting the entire coalesced region, which may
248 // include any combination of committed and decommitted spans, at the end of
249 // the method.
250
251 // TODO(jar): "Always decommit" causes some extra calls to commit when we are
252 // called in GrowHeap() during an allocation :-/. We need to eval the cost of
253 // that oscillation, and possibly do something to reduce it.
254
255 // TODO(jar): We need a better strategy for deciding to commit, or decommit,
256 // based on memory usage and free heap sizes.
257
212 const PageID p = span->start; 258 const PageID p = span->start;
213 const Length n = span->length; 259 const Length n = span->length;
214 Span* prev = GetDescriptor(p-1); 260 Span* prev = GetDescriptor(p-1);
215 if (prev != NULL && prev->location == span->location) { 261 if (prev != NULL && prev->location != Span::IN_USE) {
216 // Merge preceding span into this span 262 // Merge preceding span into this span
217 ASSERT(prev->start + prev->length == p); 263 ASSERT(prev->start + prev->length == p);
218 const Length len = prev->length; 264 const Length len = prev->length;
265 if (prev->location == Span::ON_RETURNED_FREELIST) {
266 // We're about to put the merge span into the returned freelist and call
267 // DecommitSpan() on it, which will mark the entire span including this
268 // one as released and decrease stats_.committed_bytes by the size of the
269 // merged span. To make the math work out we temporarily increase the
270 // stats_.committed_bytes amount.
271 stats_.committed_bytes += prev->length << kPageShift;
272 }
219 RemoveFromFreeList(prev); 273 RemoveFromFreeList(prev);
220 DeleteSpan(prev); 274 DeleteSpan(prev);
221 span->start -= len; 275 span->start -= len;
222 span->length += len; 276 span->length += len;
223 pagemap_.set(span->start, span); 277 pagemap_.set(span->start, span);
224 Event(span, 'L', len); 278 Event(span, 'L', len);
225 } 279 }
226 Span* next = GetDescriptor(p+n); 280 Span* next = GetDescriptor(p+n);
227 if (next != NULL && next->location == span->location) { 281 if (next != NULL && next->location != Span::IN_USE) {
228 // Merge next span into this span 282 // Merge next span into this span
229 ASSERT(next->start == p+n); 283 ASSERT(next->start == p+n);
230 const Length len = next->length; 284 const Length len = next->length;
285 if (next->location == Span::ON_RETURNED_FREELIST) {
286 // See the comment below 'if (prev->location ...' for explanation.
287 stats_.committed_bytes += next->length << kPageShift;
288 }
231 RemoveFromFreeList(next); 289 RemoveFromFreeList(next);
232 DeleteSpan(next); 290 DeleteSpan(next);
233 span->length += len; 291 span->length += len;
234 pagemap_.set(span->start + span->length - 1, span); 292 pagemap_.set(span->start + span->length - 1, span);
235 Event(span, 'R', len); 293 Event(span, 'R', len);
236 } 294 }
237 295
296 Event(span, 'D', span->length);
297 span->location = Span::ON_RETURNED_FREELIST;
298 DecommitSpan(span);
238 PrependToFreeList(span); 299 PrependToFreeList(span);
239 } 300 }
240 301
241 void PageHeap::PrependToFreeList(Span* span) { 302 void PageHeap::PrependToFreeList(Span* span) {
242 ASSERT(span->location != Span::IN_USE); 303 ASSERT(span->location != Span::IN_USE);
243 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_; 304 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
244 if (span->location == Span::ON_NORMAL_FREELIST) { 305 if (span->location == Span::ON_NORMAL_FREELIST) {
245 stats_.free_bytes += (span->length << kPageShift); 306 stats_.free_bytes += (span->length << kPageShift);
246 DLL_Prepend(&list->normal, span); 307 DLL_Prepend(&list->normal, span);
247 } else { 308 } else {
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
414 ask = n; 475 ask = n;
415 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); 476 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
416 } 477 }
417 if (ptr == NULL) return false; 478 if (ptr == NULL) return false;
418 } 479 }
419 ask = actual_size >> kPageShift; 480 ask = actual_size >> kPageShift;
420 RecordGrowth(ask << kPageShift); 481 RecordGrowth(ask << kPageShift);
421 482
422 uint64_t old_system_bytes = stats_.system_bytes; 483 uint64_t old_system_bytes = stats_.system_bytes;
423 stats_.system_bytes += (ask << kPageShift); 484 stats_.system_bytes += (ask << kPageShift);
485 stats_.committed_bytes += (ask << kPageShift);
424 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; 486 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
425 ASSERT(p > 0); 487 ASSERT(p > 0);
426 488
427 // If we have already a lot of pages allocated, just pre allocate a bunch of 489 // If we have already a lot of pages allocated, just pre allocate a bunch of
428 // memory for the page map. This prevents fragmentation by pagemap metadata 490 // memory for the page map. This prevents fragmentation by pagemap metadata
429 // when a program keeps allocating and freeing large blocks. 491 // when a program keeps allocating and freeing large blocks.
430 492
431 if (old_system_bytes < kPageMapBigAllocationThreshold 493 if (old_system_bytes < kPageMapBigAllocationThreshold
432 && stats_.system_bytes >= kPageMapBigAllocationThreshold) { 494 && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
433 pagemap_.PreallocateMoreMemory(); 495 pagemap_.PreallocateMoreMemory();
434 } 496 }
435 497
436 // Make sure pagemap_ has entries for all of the new pages. 498 // Make sure pagemap_ has entries for all of the new pages.
437 // Plus ensure one before and one after so coalescing code 499 // Plus ensure one before and one after so coalescing code
438 // does not need bounds-checking. 500 // does not need bounds-checking.
439 if (pagemap_.Ensure(p-1, ask+2)) { 501 if (pagemap_.Ensure(p-1, ask+2)) {
440 // Pretend the new area is allocated and then Delete() it to cause 502 // Pretend the new area is allocated and then Delete() it to cause
441 // any necessary coalescing to occur. 503 // any necessary coalescing to occur.
442 Span* span = NewSpan(p, ask); 504 Span* span = NewSpan(p, ask);
443 RecordSpan(span); 505 RecordSpan(span);
444 Delete(span); 506 Delete(span);
507 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
445 ASSERT(Check()); 508 ASSERT(Check());
446 return true; 509 return true;
447 } else { 510 } else {
448 // We could not allocate memory within "pagemap_" 511 // We could not allocate memory within "pagemap_"
449 // TODO: Once we can return memory to the system, return the new span 512 // TODO: Once we can return memory to the system, return the new span
450 return false; 513 return false;
451 } 514 }
452 } 515 }
453 516
454 bool PageHeap::Check() { 517 bool PageHeap::Check() {
(...skipping 19 matching lines...) Expand all
474 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED 537 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
475 CHECK_CONDITION(s->length >= min_pages); 538 CHECK_CONDITION(s->length >= min_pages);
476 CHECK_CONDITION(s->length <= max_pages); 539 CHECK_CONDITION(s->length <= max_pages);
477 CHECK_CONDITION(GetDescriptor(s->start) == s); 540 CHECK_CONDITION(GetDescriptor(s->start) == s);
478 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); 541 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
479 } 542 }
480 return true; 543 return true;
481 } 544 }
482 545
483 } // namespace tcmalloc 546 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/page_heap.h ('k') | third_party/tcmalloc/chromium/src/page_heap_allocator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698