Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: third_party/tcmalloc/page_heap.cc

Issue 165275: Major changes to the Chrome allocator.... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 11 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « third_party/tcmalloc/jemalloc/rb.h ('k') | third_party/tcmalloc/port.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com>
32
33 #include <config.h>
34 #include "page_heap.h"
35
36 #include "static_vars.h"
37 #include "system-alloc.h"
38
39 DEFINE_double(tcmalloc_release_rate,
40 EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
41 "Rate at which we release unused memory to the system. "
42 "Zero means we never release memory back to the system. "
43 "Increase this flag to return memory faster; decrease it "
44 "to return memory slower. Reasonable rates are in the "
45 "range [0,10]");
46
47 namespace tcmalloc {
48
49 PageHeap::PageHeap()
50 : pagemap_(MetaDataAlloc),
51 pagemap_cache_(0),
52 free_pages_(0),
53 system_bytes_(0),
54 scavenge_counter_(0),
55 // Start scavenging at kMaxPages list
56 scavenge_index_(kMaxPages-1) {
57 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
58 DLL_Init(&large_.normal);
59 DLL_Init(&large_.returned);
60 for (int i = 0; i < kMaxPages; i++) {
61 DLL_Init(&free_[i].normal);
62 DLL_Init(&free_[i].returned);
63 }
64 }
65
66 Span* PageHeap::New(Length n) {
67 ASSERT(Check());
68 ASSERT(n > 0);
69
70 // Find first size >= n that has a non-empty list
71 for (Length s = n; s < kMaxPages; s++) {
72 Span* ll = &free_[s].normal;
73 // If we're lucky, ll is non-empty, meaning it has a suitable span.
74 if (!DLL_IsEmpty(ll)) {
75 ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
76 return Carve(ll->next, n);
77 }
78 // Alternatively, maybe there's a usable returned span.
79 ll = &free_[s].returned;
80 if (!DLL_IsEmpty(ll)) {
81 ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
82 return Carve(ll->next, n);
83 }
84 // Still no luck, so keep looking in larger classes.
85 }
86
87 Span* result = AllocLarge(n);
88 if (result != NULL) return result;
89
90 // Grow the heap and try again
91 if (!GrowHeap(n)) {
92 ASSERT(Check());
93 return NULL;
94 }
95
96 return AllocLarge(n);
97 }
98
99 Span* PageHeap::AllocLarge(Length n) {
100 // find the best span (closest to n in size).
101 // The following loops implements address-ordered best-fit.
102 Span *best = NULL;
103
104 // Search through normal list
105 for (Span* span = large_.normal.next;
106 span != &large_.normal;
107 span = span->next) {
108 if (span->length >= n) {
109 if ((best == NULL)
110 || (span->length < best->length)
111 || ((span->length == best->length) && (span->start < best->start))) {
112 best = span;
113 ASSERT(best->location == Span::ON_NORMAL_FREELIST);
114 }
115 }
116 }
117
118 // Search through released list in case it has a better fit
119 for (Span* span = large_.returned.next;
120 span != &large_.returned;
121 span = span->next) {
122 if (span->length >= n) {
123 if ((best == NULL)
124 || (span->length < best->length)
125 || ((span->length == best->length) && (span->start < best->start))) {
126 best = span;
127 ASSERT(best->location == Span::ON_RETURNED_FREELIST);
128 }
129 }
130 }
131
132 return best == NULL ? NULL : Carve(best, n);
133 }
134
135 Span* PageHeap::Split(Span* span, Length n) {
136 ASSERT(0 < n);
137 ASSERT(n < span->length);
138 ASSERT(span->location == Span::IN_USE);
139 ASSERT(span->sizeclass == 0);
140 Event(span, 'T', n);
141
142 const int extra = span->length - n;
143 Span* leftover = NewSpan(span->start + n, extra);
144 ASSERT(leftover->location == Span::IN_USE);
145 Event(leftover, 'U', extra);
146 RecordSpan(leftover);
147 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
148 span->length = n;
149
150 return leftover;
151 }
152
153 Span* PageHeap::Carve(Span* span, Length n) {
154 ASSERT(n > 0);
155 ASSERT(span->location != Span::IN_USE);
156 const int old_location = span->location;
157 DLL_Remove(span);
158 span->location = Span::IN_USE;
159 Event(span, 'A', n);
160
161 const int extra = span->length - n;
162 ASSERT(extra >= 0);
163 if (extra > 0) {
164 Span* leftover = NewSpan(span->start + n, extra);
165 leftover->location = old_location;
166 Event(leftover, 'S', extra);
167 RecordSpan(leftover);
168
169 // Place leftover span on appropriate free list
170 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_;
171 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST
172 ? &listpair->returned : &listpair->normal);
173 DLL_Prepend(dst, leftover);
174
175 span->length = n;
176 pagemap_.set(span->start + n - 1, span);
177 }
178 ASSERT(Check());
179 free_pages_ -= n;
180 if (old_location == Span::ON_RETURNED_FREELIST) {
181 // We need to recommit this address space.
182 TCMalloc_SystemCommit(
183 reinterpret_cast<void*>(span->start << kPageShift),
184 static_cast<size_t>(span->length << kPageShift));
185 }
186 return span;
187 }
188
189 void PageHeap::Delete(Span* span) {
190 ASSERT(Check());
191 ASSERT(span->location == Span::IN_USE);
192 ASSERT(span->length > 0);
193 ASSERT(GetDescriptor(span->start) == span);
194 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
195 span->sizeclass = 0;
196 span->sample = 0;
197
198 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
199 // necessary. We do not bother resetting the stale pagemap
200 // entries for the pieces we are merging together because we only
201 // care about the pagemap entries for the boundaries.
202 //
203 // Note that the spans we merge into "span" may come out of
204 // a "normal" list. For simplicity, we move these into the
205 // "returned" list of the appropriate size class. We do this
206 // so that we can maximize large, continuous blocks of freed
207 // space.
208 const PageID p = span->start;
209 const Length n = span->length;
210 Span* prev = GetDescriptor(p-1);
211 if (prev != NULL && prev->location != Span::IN_USE) {
212 // Merge preceding span into this span
213 ASSERT(prev->start + prev->length == p);
214 const Length len = prev->length;
215 DLL_Remove(prev);
216 DeleteSpan(prev);
217 span->start -= len;
218 span->length += len;
219 pagemap_.set(span->start, span);
220 Event(span, 'L', len);
221 }
222 Span* next = GetDescriptor(p+n);
223 if (next != NULL && next->location != Span::IN_USE) {
224 // Merge next span into this span
225 ASSERT(next->start == p+n);
226 const Length len = next->length;
227 DLL_Remove(next);
228 DeleteSpan(next);
229 span->length += len;
230 pagemap_.set(span->start + span->length - 1, span);
231 Event(span, 'R', len);
232 }
233
234 Event(span, 'D', span->length);
235 span->location = Span::ON_RETURNED_FREELIST;
236 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
237 static_cast<size_t>(span->length << kPageShift));
238 if (span->length < kMaxPages)
239 DLL_Prepend(&free_[span->length].returned, span);
240 else
241 DLL_Prepend(&large_.returned, span);
242 free_pages_ += n;
243
244 IncrementalScavenge(n);
245 ASSERT(Check());
246 }
247
248 void PageHeap::IncrementalScavenge(Length n) {
249 // Fast path; not yet time to release memory
250 scavenge_counter_ -= n;
251 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
252
253 // Never delay scavenging for more than the following number of
254 // deallocated pages. With 4K pages, this comes to 4GB of
255 // deallocation.
256 // Chrome: Changed to 64MB
257 static const int kMaxReleaseDelay = 1 << 14;
258
259 // If there is nothing to release, wait for so many pages before
260 // scavenging again. With 4K pages, this comes to 1GB of memory.
261 // Chrome: Changed to 16MB
262 static const int kDefaultReleaseDelay = 1 << 12;
263
264 const double rate = FLAGS_tcmalloc_release_rate;
265 if (rate <= 1e-6) {
266 // Tiny release rate means that releasing is disabled.
267 scavenge_counter_ = kDefaultReleaseDelay;
268 return;
269 }
270
271 // Find index of free list to scavenge
272 int index = scavenge_index_ + 1;
273 for (int i = 0; i < kMaxPages+1; i++) {
274 if (index > kMaxPages) index = 0;
275 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
276 if (!DLL_IsEmpty(&slist->normal)) {
277 // Release the last span on the normal portion of this list
278 Span* s = slist->normal.prev;
279 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
280 DLL_Remove(s);
281 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
282 static_cast<size_t>(s->length << kPageShift));
283 s->location = Span::ON_RETURNED_FREELIST;
284 DLL_Prepend(&slist->returned, s);
285
286 // Compute how long to wait until we return memory.
287 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
288 // after releasing one page.
289 const double mult = 1000.0 / rate;
290 double wait = mult * static_cast<double>(s->length);
291 if (wait > kMaxReleaseDelay) {
292 // Avoid overflow and bound to reasonable range
293 wait = kMaxReleaseDelay;
294 }
295 scavenge_counter_ = static_cast<int64_t>(wait);
296
297 scavenge_index_ = index; // Scavenge at index+1 next time
298 // Note: we stop scavenging after finding one.
299 return;
300 }
301 index++;
302 }
303
304 // Nothing to scavenge, delay for a while
305 scavenge_counter_ = kDefaultReleaseDelay;
306 }
307
308 void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
309 // Associate span object with all interior pages as well
310 ASSERT(span->location == Span::IN_USE);
311 ASSERT(GetDescriptor(span->start) == span);
312 ASSERT(GetDescriptor(span->start+span->length-1) == span);
313 Event(span, 'C', sc);
314 span->sizeclass = sc;
315 for (Length i = 1; i < span->length-1; i++) {
316 pagemap_.set(span->start+i, span);
317 }
318 }
319
320 static double PagesToMB(uint64_t pages) {
321 return (pages << kPageShift) / 1048576.0;
322 }
323
324 void PageHeap::Dump(TCMalloc_Printer* out) {
325 int nonempty_sizes = 0;
326 for (int s = 0; s < kMaxPages; s++) {
327 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
328 nonempty_sizes++;
329 }
330 }
331 out->printf("------------------------------------------------\n");
332 out->printf("PageHeap: %d sizes; %6.1f MB free\n",
333 nonempty_sizes, PagesToMB(free_pages_));
334 out->printf("------------------------------------------------\n");
335 uint64_t total_normal = 0;
336 uint64_t total_returned = 0;
337 for (int s = 0; s < kMaxPages; s++) {
338 const int n_length = DLL_Length(&free_[s].normal);
339 const int r_length = DLL_Length(&free_[s].returned);
340 if (n_length + r_length > 0) {
341 uint64_t n_pages = s * n_length;
342 uint64_t r_pages = s * r_length;
343 total_normal += n_pages;
344 total_returned += r_pages;
345 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
346 "; unmapped: %6.1f MB; %6.1f MB cum\n",
347 s,
348 (n_length + r_length),
349 PagesToMB(n_pages + r_pages),
350 PagesToMB(total_normal + total_returned),
351 PagesToMB(r_pages),
352 PagesToMB(total_returned));
353 }
354 }
355
356 uint64_t n_pages = 0;
357 uint64_t r_pages = 0;
358 int n_spans = 0;
359 int r_spans = 0;
360 out->printf("Normal large spans:\n");
361 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
362 out->printf(" [ %6" PRIuPTR " pages ] %6.1f MB\n",
363 s->length, PagesToMB(s->length));
364 n_pages += s->length;
365 n_spans++;
366 }
367 out->printf("Unmapped large spans:\n");
368 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
369 out->printf(" [ %6" PRIuPTR " pages ] %6.1f MB\n",
370 s->length, PagesToMB(s->length));
371 r_pages += s->length;
372 r_spans++;
373 }
374 total_normal += n_pages;
375 total_returned += r_pages;
376 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
377 "; unmapped: %6.1f MB; %6.1f MB cum\n",
378 (n_spans + r_spans),
379 PagesToMB(n_pages + r_pages),
380 PagesToMB(total_normal + total_returned),
381 PagesToMB(r_pages),
382 PagesToMB(total_returned));
383 }
384
385 static void RecordGrowth(size_t growth) {
386 StackTrace* t = Static::stacktrace_allocator()->New();
387 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
388 t->size = growth;
389 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
390 Static::set_growth_stacks(t);
391 }
392
393 bool PageHeap::GrowHeap(Length n) {
394 ASSERT(kMaxPages >= kMinSystemAlloc);
395 if (n > kMaxValidPages) return false;
396 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
397 size_t actual_size;
398 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
399 if (ptr == NULL) {
400 if (n < ask) {
401 // Try growing just "n" pages
402 ask = n;
403 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
404 }
405 if (ptr == NULL) return false;
406 }
407 ask = actual_size >> kPageShift;
408 RecordGrowth(ask << kPageShift);
409
410 uint64_t old_system_bytes = system_bytes_;
411 system_bytes_ += (ask << kPageShift);
412 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
413 ASSERT(p > 0);
414
415 // If we have already a lot of pages allocated, just pre allocate a bunch of
416 // memory for the page map. This prevents fragmentation by pagemap metadata
417 // when a program keeps allocating and freeing large blocks.
418
419 if (old_system_bytes < kPageMapBigAllocationThreshold
420 && system_bytes_ >= kPageMapBigAllocationThreshold) {
421 pagemap_.PreallocateMoreMemory();
422 }
423
424 // Make sure pagemap_ has entries for all of the new pages.
425 // Plus ensure one before and one after so coalescing code
426 // does not need bounds-checking.
427 if (pagemap_.Ensure(p-1, ask+2)) {
428 // Pretend the new area is allocated and then Delete() it to
429 // cause any necessary coalescing to occur.
430 //
431 // We do not adjust free_pages_ here since Delete() will do it for us.
432 Span* span = NewSpan(p, ask);
433 RecordSpan(span);
434 Delete(span);
435 ASSERT(Check());
436 return true;
437 } else {
438 // We could not allocate memory within "pagemap_"
439 // TODO: Once we can return memory to the system, return the new span
440 return false;
441 }
442 }
443
444 bool PageHeap::Check() {
445 ASSERT(free_[0].normal.next == &free_[0].normal);
446 ASSERT(free_[0].returned.next == &free_[0].returned);
447 return true;
448 }
449
450 bool PageHeap::CheckExpensive() {
451 bool result = Check();
452 CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
453 CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST) ;
454 for (Length s = 1; s < kMaxPages; s++) {
455 CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
456 CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
457 }
458 return result;
459 }
460
461 bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
462 int freelist) {
463 for (Span* s = list->next; s != list; s = s->next) {
464 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
465 CHECK_CONDITION(s->length >= min_pages);
466 CHECK_CONDITION(s->length <= max_pages);
467 CHECK_CONDITION(GetDescriptor(s->start) == s);
468 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
469 }
470 return true;
471 }
472
473 static void ReleaseFreeList(Span* list, Span* returned) {
474 // Walk backwards through list so that when we push these
475 // spans on the "returned" list, we preserve the order.
476 while (!DLL_IsEmpty(list)) {
477 Span* s = list->prev;
478 DLL_Remove(s);
479 DLL_Prepend(returned, s);
480 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
481 s->location = Span::ON_RETURNED_FREELIST;
482 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
483 static_cast<size_t>(s->length << kPageShift));
484 }
485 }
486
487 void PageHeap::ReleaseFreePages() {
488 for (Length s = 0; s < kMaxPages; s++) {
489 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
490 }
491 ReleaseFreeList(&large_.normal, &large_.returned);
492 ASSERT(Check());
493 }
494
495 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/jemalloc/rb.h ('k') | third_party/tcmalloc/port.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698