Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: third_party/tcmalloc/chromium/src/page_heap.cc

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
42 "Zero means we never release memory back to the system. " 42 "Zero means we never release memory back to the system. "
43 "Increase this flag to return memory faster; decrease it " 43 "Increase this flag to return memory faster; decrease it "
44 "to return memory slower. Reasonable rates are in the " 44 "to return memory slower. Reasonable rates are in the "
45 "range [0,10]"); 45 "range [0,10]");
46 46
47 namespace tcmalloc { 47 namespace tcmalloc {
48 48
49 PageHeap::PageHeap() 49 PageHeap::PageHeap()
50 : pagemap_(MetaDataAlloc), 50 : pagemap_(MetaDataAlloc),
51 pagemap_cache_(0), 51 pagemap_cache_(0),
52 free_pages_(0),
53 system_bytes_(0),
54 committed_bytes_(0),
55 scavenge_counter_(0), 52 scavenge_counter_(0),
56 // Start scavenging at kMaxPages list 53 // Start scavenging at kMaxPages list
57 scavenge_index_(kMaxPages-1) { 54 release_index_(kMaxPages) {
58 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); 55 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
59 DLL_Init(&large_.normal); 56 DLL_Init(&large_.normal);
60 DLL_Init(&large_.returned); 57 DLL_Init(&large_.returned);
61 for (int i = 0; i < kMaxPages; i++) { 58 for (int i = 0; i < kMaxPages; i++) {
62 DLL_Init(&free_[i].normal); 59 DLL_Init(&free_[i].normal);
63 DLL_Init(&free_[i].returned); 60 DLL_Init(&free_[i].returned);
64 } 61 }
65 } 62 }
66 63
67 Span* PageHeap::New(Length n) { 64 Span* PageHeap::New(Length n) {
(...skipping 15 matching lines...) Expand all
83 return Carve(ll->next, n); 80 return Carve(ll->next, n);
84 } 81 }
85 // Still no luck, so keep looking in larger classes. 82 // Still no luck, so keep looking in larger classes.
86 } 83 }
87 84
88 Span* result = AllocLarge(n); 85 Span* result = AllocLarge(n);
89 if (result != NULL) return result; 86 if (result != NULL) return result;
90 87
91 // Grow the heap and try again 88 // Grow the heap and try again
92 if (!GrowHeap(n)) { 89 if (!GrowHeap(n)) {
90 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
93 ASSERT(Check()); 91 ASSERT(Check());
94 return NULL; 92 return NULL;
95 } 93 }
96 94
97 return AllocLarge(n); 95 return AllocLarge(n);
98 } 96 }
99 97
100 Span* PageHeap::AllocLarge(Length n) { 98 Span* PageHeap::AllocLarge(Length n) {
101 // find the best span (closest to n in size). 99 // find the best span (closest to n in size).
102 // The following loops implements address-ordered best-fit. 100 // The following loops implements address-ordered best-fit.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 RecordSpan(leftover); 145 RecordSpan(leftover);
148 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span 146 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
149 span->length = n; 147 span->length = n;
150 148
151 return leftover; 149 return leftover;
152 } 150 }
153 151
154 void PageHeap::CommitSpan(Span* span) { 152 void PageHeap::CommitSpan(Span* span) {
155 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), 153 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
156 static_cast<size_t>(span->length << kPageShift)); 154 static_cast<size_t>(span->length << kPageShift));
157 committed_bytes_ += span->length << kPageShift; 155 stats_.committed_bytes += span->length << kPageShift;
158 } 156 }
159 157
160 void PageHeap::DecommitSpan(Span* span) { 158 void PageHeap::DecommitSpan(Span* span) {
161 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), 159 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
162 static_cast<size_t>(span->length << kPageShift)); 160 static_cast<size_t>(span->length << kPageShift));
163 committed_bytes_ -= span->length << kPageShift; 161 stats_.committed_bytes -= span->length << kPageShift;
164 } 162 }
165 163
166 Span* PageHeap::Carve(Span* span, Length n) { 164 Span* PageHeap::Carve(Span* span, Length n) {
167 ASSERT(n > 0); 165 ASSERT(n > 0);
168 ASSERT(span->location != Span::IN_USE); 166 ASSERT(span->location != Span::IN_USE);
169 const int old_location = span->location; 167 const int old_location = span->location;
170 DLL_Remove(span); 168 RemoveFromFreeList(span);
171 span->location = Span::IN_USE; 169 span->location = Span::IN_USE;
172 Event(span, 'A', n); 170 Event(span, 'A', n);
173 171
174 const int extra = span->length - n; 172 const int extra = span->length - n;
175 ASSERT(extra >= 0); 173 ASSERT(extra >= 0);
176 if (extra > 0) { 174 if (extra > 0) {
177 Span* leftover = NewSpan(span->start + n, extra); 175 Span* leftover = NewSpan(span->start + n, extra);
178 leftover->location = old_location; 176 leftover->location = old_location;
179 Event(leftover, 'S', extra); 177 Event(leftover, 'S', extra);
180 RecordSpan(leftover); 178 RecordSpan(leftover);
181 179
182 // Place leftover span on appropriate free list 180 // The previous span of |leftover| was just splitted -- no need to
183 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; 181 // coalesce them. The next span of |leftover| was not previously coalesced
184 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST 182 // with |span|, i.e. is NULL or has got location other than |old_location|.
185 ? &listpair->returned : &listpair->normal); 183 const PageID p = leftover->start;
186 DLL_Prepend(dst, leftover); 184 const Length len = leftover->length;
185 Span* next = GetDescriptor(p+len);
186 ASSERT (next == NULL ||
187 next->location == Span::IN_USE ||
188 next->location != leftover->location);
187 189
190 PrependToFreeList(leftover); // Skip coalescing - no candidates possible
188 span->length = n; 191 span->length = n;
189 pagemap_.set(span->start + n - 1, span); 192 pagemap_.set(span->start + n - 1, span);
190 } 193 }
191 ASSERT(Check()); 194 ASSERT(Check());
192 free_pages_ -= n;
193 if (old_location == Span::ON_RETURNED_FREELIST) { 195 if (old_location == Span::ON_RETURNED_FREELIST) {
194 // We need to recommit this address space. 196 // We need to recommit this address space.
195 CommitSpan(span); 197 CommitSpan(span);
196 } 198 }
197 ASSERT(span->location == Span::IN_USE); 199 ASSERT(span->location == Span::IN_USE);
198 ASSERT(span->length == n); 200 ASSERT(span->length == n);
201 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
199 return span; 202 return span;
200 } 203 }
201 204
202 void PageHeap::Delete(Span* span) { 205 void PageHeap::Delete(Span* span) {
203 ASSERT(Check()); 206 ASSERT(Check());
204 ASSERT(span->location == Span::IN_USE); 207 ASSERT(span->location == Span::IN_USE);
205 ASSERT(span->length > 0); 208 ASSERT(span->length > 0);
206 ASSERT(GetDescriptor(span->start) == span); 209 ASSERT(GetDescriptor(span->start) == span);
207 ASSERT(GetDescriptor(span->start + span->length - 1) == span); 210 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
211 const Length n = span->length;
208 span->sizeclass = 0; 212 span->sizeclass = 0;
209 span->sample = 0; 213 span->sample = 0;
214 span->location = Span::ON_NORMAL_FREELIST;
215 Event(span, 'D', span->length);
216 MergeIntoFreeList(span); // Coalesces if possible
217 IncrementalScavenge(n);
218 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
219 ASSERT(Check());
220 }
221
222 void PageHeap::MergeIntoFreeList(Span* span) {
223 ASSERT(span->location != Span::IN_USE);
210 224
211 // Coalesce -- we guarantee that "p" != 0, so no bounds checking 225 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
212 // necessary. We do not bother resetting the stale pagemap 226 // necessary. We do not bother resetting the stale pagemap
213 // entries for the pieces we are merging together because we only 227 // entries for the pieces we are merging together because we only
214 // care about the pagemap entries for the boundaries. 228 // care about the pagemap entries for the boundaries.
215 // 229 //
216 // Note that the adjacent spans we merge into "span" may come out of a 230 // Note that the adjacent spans we merge into "span" may come out of a
217 // "normal" (committed) list, and cleanly merge with our IN_USE span, which 231 // "normal" (committed) list, and cleanly merge with our IN_USE span, which
218 // is implicitly committed. If the adjacents spans are on the "returned" 232 // is implicitly committed. If the adjacents spans are on the "returned"
219 // (decommitted) list, then we must get both spans into the same state before 233 // (decommitted) list, then we must get both spans into the same state before
(...skipping 12 matching lines...) Expand all
232 const PageID p = span->start; 246 const PageID p = span->start;
233 const Length n = span->length; 247 const Length n = span->length;
234 Span* prev = GetDescriptor(p-1); 248 Span* prev = GetDescriptor(p-1);
235 if (prev != NULL && prev->location != Span::IN_USE) { 249 if (prev != NULL && prev->location != Span::IN_USE) {
236 // Merge preceding span into this span 250 // Merge preceding span into this span
237 ASSERT(prev->start + prev->length == p); 251 ASSERT(prev->start + prev->length == p);
238 const Length len = prev->length; 252 const Length len = prev->length;
239 if (prev->location == Span::ON_RETURNED_FREELIST) { 253 if (prev->location == Span::ON_RETURNED_FREELIST) {
240 // We're about to put the merge span into the returned freelist and call 254 // We're about to put the merge span into the returned freelist and call
241 // DecommitSpan() on it, which will mark the entire span including this 255 // DecommitSpan() on it, which will mark the entire span including this
242 // one as released and decrease committed_bytes_ by the size of the 256 // one as released and decrease stats_.committed_bytes by the size of the
243 // merged span. To make the math work out we temporarily increase the 257 // merged span. To make the math work out we temporarily increase the
244 // committed_bytes_ amount. 258 // stats_.committed_bytes amount.
245 committed_bytes_ += prev->length << kPageShift; 259 stats_.committed_bytes += prev->length << kPageShift;
246 } 260 }
247 DLL_Remove(prev); 261 RemoveFromFreeList(prev);
248 DeleteSpan(prev); 262 DeleteSpan(prev);
249 span->start -= len; 263 span->start -= len;
250 span->length += len; 264 span->length += len;
251 pagemap_.set(span->start, span); 265 pagemap_.set(span->start, span);
252 Event(span, 'L', len); 266 Event(span, 'L', len);
253 } 267 }
254 Span* next = GetDescriptor(p+n); 268 Span* next = GetDescriptor(p+n);
255 if (next != NULL && next->location != Span::IN_USE) { 269 if (next != NULL && next->location != Span::IN_USE) {
256 // Merge next span into this span 270 // Merge next span into this span
257 ASSERT(next->start == p+n); 271 ASSERT(next->start == p+n);
258 const Length len = next->length; 272 const Length len = next->length;
259 if (next->location == Span::ON_RETURNED_FREELIST) { 273 if (next->location == Span::ON_RETURNED_FREELIST) {
260 // See the comment below 'if (prev->location ...' for explanation. 274 // See the comment below 'if (prev->location ...' for explanation.
261 committed_bytes_ += next->length << kPageShift; 275 stats_.committed_bytes += next->length << kPageShift;
262 } 276 }
263 DLL_Remove(next); 277 RemoveFromFreeList(next);
264 DeleteSpan(next); 278 DeleteSpan(next);
265 span->length += len; 279 span->length += len;
266 pagemap_.set(span->start + span->length - 1, span); 280 pagemap_.set(span->start + span->length - 1, span);
267 Event(span, 'R', len); 281 Event(span, 'R', len);
268 } 282 }
269 283
270 Event(span, 'D', span->length); 284 Event(span, 'D', span->length);
271 span->location = Span::ON_RETURNED_FREELIST; 285 span->location = Span::ON_RETURNED_FREELIST;
272 DecommitSpan(span); 286 DecommitSpan(span);
273 if (span->length < kMaxPages) { 287 PrependToFreeList(span);
274 DLL_Prepend(&free_[span->length].returned, span); 288 }
289
290 void PageHeap::PrependToFreeList(Span* span) {
291 ASSERT(span->location != Span::IN_USE);
292 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
293 if (span->location == Span::ON_NORMAL_FREELIST) {
294 stats_.free_bytes += (span->length << kPageShift);
295 DLL_Prepend(&list->normal, span);
275 } else { 296 } else {
276 DLL_Prepend(&large_.returned, span); 297 stats_.unmapped_bytes += (span->length << kPageShift);
298 DLL_Prepend(&list->returned, span);
277 } 299 }
278 free_pages_ += n; 300 }
279 301
280 IncrementalScavenge(n); 302 void PageHeap::RemoveFromFreeList(Span* span) {
281 ASSERT(Check()); 303 ASSERT(span->location != Span::IN_USE);
304 if (span->location == Span::ON_NORMAL_FREELIST) {
305 stats_.free_bytes -= (span->length << kPageShift);
306 } else {
307 stats_.unmapped_bytes -= (span->length << kPageShift);
308 }
309 DLL_Remove(span);
282 } 310 }
283 311
284 void PageHeap::IncrementalScavenge(Length n) { 312 void PageHeap::IncrementalScavenge(Length n) {
285 // Fast path; not yet time to release memory 313 // Fast path; not yet time to release memory
286 scavenge_counter_ -= n; 314 scavenge_counter_ -= n;
287 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge 315 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
288 316
289 // Never delay scavenging for more than the following number of
290 // deallocated pages. With 4K pages, this comes to 4GB of
291 // deallocation.
292 // Chrome: Changed to 64MB
293 static const int kMaxReleaseDelay = 1 << 14;
294
295 // If there is nothing to release, wait for so many pages before
296 // scavenging again. With 4K pages, this comes to 1GB of memory.
297 // Chrome: Changed to 16MB
298 static const int kDefaultReleaseDelay = 1 << 12;
299
300 const double rate = FLAGS_tcmalloc_release_rate; 317 const double rate = FLAGS_tcmalloc_release_rate;
301 if (rate <= 1e-6) { 318 if (rate <= 1e-6) {
302 // Tiny release rate means that releasing is disabled. 319 // Tiny release rate means that releasing is disabled.
303 scavenge_counter_ = kDefaultReleaseDelay; 320 scavenge_counter_ = kDefaultReleaseDelay;
304 return; 321 return;
305 } 322 }
306 323
307 // Find index of free list to scavenge 324 Length released_pages = ReleaseAtLeastNPages(1);
308 int index = scavenge_index_ + 1;
309 for (int i = 0; i < kMaxPages+1; i++) {
310 if (index > kMaxPages) index = 0;
311 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
312 if (!DLL_IsEmpty(&slist->normal)) {
313 // Release the last span on the normal portion of this list
314 Span* s = slist->normal.prev;
315 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
316 DLL_Remove(s);
317 DecommitSpan(s);
318 s->location = Span::ON_RETURNED_FREELIST;
319 DLL_Prepend(&slist->returned, s);
320 325
321 // Compute how long to wait until we return memory. 326 if (released_pages == 0) {
322 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages 327 // Nothing to scavenge, delay for a while.
323 // after releasing one page. 328 scavenge_counter_ = kDefaultReleaseDelay;
324 const double mult = 1000.0 / rate; 329 } else {
325 double wait = mult * static_cast<double>(s->length); 330 // Compute how long to wait until we return memory.
326 if (wait > kMaxReleaseDelay) { 331 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
327 // Avoid overflow and bound to reasonable range 332 // after releasing one page.
328 wait = kMaxReleaseDelay; 333 const double mult = 1000.0 / rate;
334 double wait = mult * static_cast<double>(released_pages);
335 if (wait > kMaxReleaseDelay) {
336 // Avoid overflow and bound to reasonable range.
337 wait = kMaxReleaseDelay;
338 }
339 scavenge_counter_ = static_cast<int64_t>(wait);
340 }
341 }
342
343 Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) {
344 Span* s = slist->normal.prev;
345 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
346 RemoveFromFreeList(s);
347 const Length n = s->length;
348 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
349 static_cast<size_t>(s->length << kPageShift));
350 s->location = Span::ON_RETURNED_FREELIST;
351 MergeIntoFreeList(s); // Coalesces if possible.
352 return n;
353 }
354
355 Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
356 Length released_pages = 0;
357 Length prev_released_pages = -1;
358
359 // Round robin through the lists of free spans, releasing the last
360 // span in each list. Stop after releasing at least num_pages.
361 while (released_pages < num_pages) {
362 if (released_pages == prev_released_pages) {
363 // Last iteration of while loop made no progress.
364 break;
365 }
366 prev_released_pages = released_pages;
367
368 for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
369 i++, release_index_++) {
370 if (release_index_ > kMaxPages) release_index_ = 0;
371 SpanList* slist = (release_index_ == kMaxPages) ?
372 &large_ : &free_[release_index_];
373 if (!DLL_IsEmpty(&slist->normal)) {
374 Length released_len = ReleaseLastNormalSpan(slist);
375 released_pages += released_len;
329 } 376 }
330 scavenge_counter_ = static_cast<int64_t>(wait);
331
332 scavenge_index_ = index; // Scavenge at index+1 next time
333 // Note: we stop scavenging after finding one.
334 return;
335 } 377 }
336 index++;
337 } 378 }
338 379 return released_pages;
339 // Nothing to scavenge, delay for a while
340 scavenge_counter_ = kDefaultReleaseDelay;
341 } 380 }
342 381
343 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { 382 void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
344 // Associate span object with all interior pages as well 383 // Associate span object with all interior pages as well
345 ASSERT(span->location == Span::IN_USE); 384 ASSERT(span->location == Span::IN_USE);
346 ASSERT(GetDescriptor(span->start) == span); 385 ASSERT(GetDescriptor(span->start) == span);
347 ASSERT(GetDescriptor(span->start+span->length-1) == span); 386 ASSERT(GetDescriptor(span->start+span->length-1) == span);
348 Event(span, 'C', sc); 387 Event(span, 'C', sc);
349 span->sizeclass = sc; 388 span->sizeclass = sc;
350 for (Length i = 1; i < span->length-1; i++) { 389 for (Length i = 1; i < span->length-1; i++) {
351 pagemap_.set(span->start+i, span); 390 pagemap_.set(span->start+i, span);
352 } 391 }
353 } 392 }
354 393
394 static double MB(uint64_t bytes) {
395 return bytes / 1048576.0;
396 }
397
355 static double PagesToMB(uint64_t pages) { 398 static double PagesToMB(uint64_t pages) {
356 return (pages << kPageShift) / 1048576.0; 399 return (pages << kPageShift) / 1048576.0;
357 } 400 }
358 401
359 void PageHeap::Dump(TCMalloc_Printer* out) { 402 void PageHeap::Dump(TCMalloc_Printer* out) {
360 int nonempty_sizes = 0; 403 int nonempty_sizes = 0;
361 for (int s = 0; s < kMaxPages; s++) { 404 for (int s = 0; s < kMaxPages; s++) {
362 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { 405 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
363 nonempty_sizes++; 406 nonempty_sizes++;
364 } 407 }
365 } 408 }
366 out->printf("------------------------------------------------\n"); 409 out->printf("------------------------------------------------\n");
367 out->printf("PageHeap: %d sizes; %6.1f MB free\n", 410 out->printf("PageHeap: %d sizes; %6.1f MB free; %6.1f MB unmapped\n",
368 nonempty_sizes, PagesToMB(free_pages_)); 411 nonempty_sizes, MB(stats_.free_bytes), MB(stats_.unmapped_bytes));
369 out->printf("------------------------------------------------\n"); 412 out->printf("------------------------------------------------\n");
370 uint64_t total_normal = 0; 413 uint64_t total_normal = 0;
371 uint64_t total_returned = 0; 414 uint64_t total_returned = 0;
372 for (int s = 0; s < kMaxPages; s++) { 415 for (int s = 0; s < kMaxPages; s++) {
373 const int n_length = DLL_Length(&free_[s].normal); 416 const int n_length = DLL_Length(&free_[s].normal);
374 const int r_length = DLL_Length(&free_[s].returned); 417 const int r_length = DLL_Length(&free_[s].returned);
375 if (n_length + r_length > 0) { 418 if (n_length + r_length > 0) {
376 uint64_t n_pages = s * n_length; 419 uint64_t n_pages = s * n_length;
377 uint64_t r_pages = s * r_length; 420 uint64_t r_pages = s * r_length;
378 total_normal += n_pages; 421 total_normal += n_pages;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 total_returned += r_pages; 453 total_returned += r_pages;
411 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" 454 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
412 "; unmapped: %6.1f MB; %6.1f MB cum\n", 455 "; unmapped: %6.1f MB; %6.1f MB cum\n",
413 (n_spans + r_spans), 456 (n_spans + r_spans),
414 PagesToMB(n_pages + r_pages), 457 PagesToMB(n_pages + r_pages),
415 PagesToMB(total_normal + total_returned), 458 PagesToMB(total_normal + total_returned),
416 PagesToMB(r_pages), 459 PagesToMB(r_pages),
417 PagesToMB(total_returned)); 460 PagesToMB(total_returned));
418 } 461 }
419 462
463 bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
464 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
465 if (span == NULL) {
466 return false;
467 }
468 r->address = span->start << kPageShift;
469 r->length = span->length << kPageShift;
470 r->fraction = 0;
471 switch (span->location) {
472 case Span::IN_USE:
473 r->type = base::MallocRange::INUSE;
474 r->fraction = 1;
475 if (span->sizeclass > 0) {
476 // Only some of the objects in this span may be in use.
477 const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
478 r->fraction = (1.0 * osize * span->refcount) / r->length;
479 }
480 break;
481 case Span::ON_NORMAL_FREELIST:
482 r->type = base::MallocRange::FREE;
483 break;
484 case Span::ON_RETURNED_FREELIST:
485 r->type = base::MallocRange::UNMAPPED;
486 break;
487 default:
488 r->type = base::MallocRange::UNKNOWN;
489 break;
490 }
491 return true;
492 }
493
420 static void RecordGrowth(size_t growth) { 494 static void RecordGrowth(size_t growth) {
421 StackTrace* t = Static::stacktrace_allocator()->New(); 495 StackTrace* t = Static::stacktrace_allocator()->New();
422 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3); 496 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
423 t->size = growth; 497 t->size = growth;
424 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks()); 498 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
425 Static::set_growth_stacks(t); 499 Static::set_growth_stacks(t);
426 } 500 }
427 501
428 bool PageHeap::GrowHeap(Length n) { 502 bool PageHeap::GrowHeap(Length n) {
429 ASSERT(kMaxPages >= kMinSystemAlloc); 503 ASSERT(kMaxPages >= kMinSystemAlloc);
430 if (n > kMaxValidPages) return false; 504 if (n > kMaxValidPages) return false;
431 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); 505 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
432 size_t actual_size; 506 size_t actual_size;
433 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); 507 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
434 if (ptr == NULL) { 508 if (ptr == NULL) {
435 if (n < ask) { 509 if (n < ask) {
436 // Try growing just "n" pages 510 // Try growing just "n" pages
437 ask = n; 511 ask = n;
438 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); 512 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
439 } 513 }
440 if (ptr == NULL) return false; 514 if (ptr == NULL) return false;
441 } 515 }
442 ask = actual_size >> kPageShift; 516 ask = actual_size >> kPageShift;
443 RecordGrowth(ask << kPageShift); 517 RecordGrowth(ask << kPageShift);
444 518
445 uint64_t old_system_bytes = system_bytes_; 519 uint64_t old_system_bytes = stats_.system_bytes;
446 system_bytes_ += (ask << kPageShift); 520 stats_.system_bytes += (ask << kPageShift);
447 committed_bytes_ += (ask << kPageShift); 521 stats_.committed_bytes += (ask << kPageShift);
448 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; 522 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
449 ASSERT(p > 0); 523 ASSERT(p > 0);
450 524
451 // If we have already a lot of pages allocated, just pre allocate a bunch of 525 // If we have already a lot of pages allocated, just pre allocate a bunch of
452 // memory for the page map. This prevents fragmentation by pagemap metadata 526 // memory for the page map. This prevents fragmentation by pagemap metadata
453 // when a program keeps allocating and freeing large blocks. 527 // when a program keeps allocating and freeing large blocks.
454 528
455 if (old_system_bytes < kPageMapBigAllocationThreshold 529 if (old_system_bytes < kPageMapBigAllocationThreshold
456 && system_bytes_ >= kPageMapBigAllocationThreshold) { 530 && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
457 pagemap_.PreallocateMoreMemory(); 531 pagemap_.PreallocateMoreMemory();
458 } 532 }
459 533
460 // Make sure pagemap_ has entries for all of the new pages. 534 // Make sure pagemap_ has entries for all of the new pages.
461 // Plus ensure one before and one after so coalescing code 535 // Plus ensure one before and one after so coalescing code
462 // does not need bounds-checking. 536 // does not need bounds-checking.
463 if (pagemap_.Ensure(p-1, ask+2)) { 537 if (pagemap_.Ensure(p-1, ask+2)) {
464 // Pretend the new area is allocated and then Delete() it to 538 // Pretend the new area is allocated and then Delete() it to cause
465 // cause any necessary coalescing to occur. 539 // any necessary coalescing to occur.
466 //
467 // We do not adjust free_pages_ here since Delete() will do it for us.
468 Span* span = NewSpan(p, ask); 540 Span* span = NewSpan(p, ask);
469 RecordSpan(span); 541 RecordSpan(span);
470 Delete(span); 542 Delete(span);
543 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
471 ASSERT(Check()); 544 ASSERT(Check());
472 return true; 545 return true;
473 } else { 546 } else {
474 // We could not allocate memory within "pagemap_" 547 // We could not allocate memory within "pagemap_"
475 // TODO: Once we can return memory to the system, return the new span 548 // TODO: Once we can return memory to the system, return the new span
476 return false; 549 return false;
477 } 550 }
478 } 551 }
479 552
480 bool PageHeap::Check() { 553 bool PageHeap::Check() {
(...skipping 18 matching lines...) Expand all
499 for (Span* s = list->next; s != list; s = s->next) { 572 for (Span* s = list->next; s != list; s = s->next) {
500 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED 573 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
501 CHECK_CONDITION(s->length >= min_pages); 574 CHECK_CONDITION(s->length >= min_pages);
502 CHECK_CONDITION(s->length <= max_pages); 575 CHECK_CONDITION(s->length <= max_pages);
503 CHECK_CONDITION(GetDescriptor(s->start) == s); 576 CHECK_CONDITION(GetDescriptor(s->start) == s);
504 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); 577 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
505 } 578 }
506 return true; 579 return true;
507 } 580 }
508 581
509 void PageHeap::ReleaseFreeList(Span* list, Span* returned) {
510 // Walk backwards through list so that when we push these
511 // spans on the "returned" list, we preserve the order.
512 while (!DLL_IsEmpty(list)) {
513 Span* s = list->prev;
514 DLL_Remove(s);
515 DLL_Prepend(returned, s);
516 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
517 s->location = Span::ON_RETURNED_FREELIST;
518 DecommitSpan(s);
519 }
520 }
521
522 void PageHeap::ReleaseFreePages() {
523 for (Length s = 0; s < kMaxPages; s++) {
524 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
525 }
526 ReleaseFreeList(&large_.normal, &large_.returned);
527 ASSERT(Check());
528 }
529
530 } // namespace tcmalloc 582 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/page_heap.h ('k') | third_party/tcmalloc/chromium/src/pagemap.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698