OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
93 Span* PageHeap::New(Length n) { | 93 Span* PageHeap::New(Length n) { |
94 ASSERT(Check()); | 94 ASSERT(Check()); |
95 ASSERT(n > 0); | 95 ASSERT(n > 0); |
96 | 96 |
97 Span* result = SearchFreeAndLargeLists(n); | 97 Span* result = SearchFreeAndLargeLists(n); |
98 if (result != NULL) | 98 if (result != NULL) |
99 return result; | 99 return result; |
100 | 100 |
101 // Grow the heap and try again. | 101 // Grow the heap and try again. |
102 if (!GrowHeap(n)) { | 102 if (!GrowHeap(n)) { |
| 103 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
103 ASSERT(Check()); | 104 ASSERT(Check()); |
104 return NULL; | 105 return NULL; |
105 } | 106 } |
106 return SearchFreeAndLargeLists(n); | 107 return SearchFreeAndLargeLists(n); |
107 } | 108 } |
108 | 109 |
109 Span* PageHeap::AllocLarge(Length n) { | 110 Span* PageHeap::AllocLarge(Length n) { |
110 // find the best span (closest to n in size). | 111 // find the best span (closest to n in size). |
111 // The following loops implements address-ordered best-fit. | 112 // The following loops implements address-ordered best-fit. |
112 Span *best = NULL; | 113 Span *best = NULL; |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 Span* leftover = NewSpan(span->start + n, extra); | 154 Span* leftover = NewSpan(span->start + n, extra); |
154 ASSERT(leftover->location == Span::IN_USE); | 155 ASSERT(leftover->location == Span::IN_USE); |
155 Event(leftover, 'U', extra); | 156 Event(leftover, 'U', extra); |
156 RecordSpan(leftover); | 157 RecordSpan(leftover); |
157 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | 158 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span |
158 span->length = n; | 159 span->length = n; |
159 | 160 |
160 return leftover; | 161 return leftover; |
161 } | 162 } |
162 | 163 |
| 164 void PageHeap::CommitSpan(Span* span) { |
| 165 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), |
| 166 static_cast<size_t>(span->length << kPageShift)); |
| 167 stats_.committed_bytes += span->length << kPageShift; |
| 168 } |
| 169 |
| 170 void PageHeap::DecommitSpan(Span* span) { |
| 171 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
| 172 static_cast<size_t>(span->length << kPageShift)); |
| 173 stats_.committed_bytes -= span->length << kPageShift; |
| 174 } |
| 175 |
163 Span* PageHeap::Carve(Span* span, Length n) { | 176 Span* PageHeap::Carve(Span* span, Length n) { |
164 ASSERT(n > 0); | 177 ASSERT(n > 0); |
165 ASSERT(span->location != Span::IN_USE); | 178 ASSERT(span->location != Span::IN_USE); |
166 const int old_location = span->location; | 179 const int old_location = span->location; |
167 RemoveFromFreeList(span); | 180 RemoveFromFreeList(span); |
168 span->location = Span::IN_USE; | 181 span->location = Span::IN_USE; |
169 Event(span, 'A', n); | 182 Event(span, 'A', n); |
170 | 183 |
171 const int extra = span->length - n; | 184 const int extra = span->length - n; |
172 ASSERT(extra >= 0); | 185 ASSERT(extra >= 0); |
173 if (extra > 0) { | 186 if (extra > 0) { |
174 Span* leftover = NewSpan(span->start + n, extra); | 187 Span* leftover = NewSpan(span->start + n, extra); |
175 leftover->location = old_location; | 188 leftover->location = old_location; |
176 Event(leftover, 'S', extra); | 189 Event(leftover, 'S', extra); |
177 RecordSpan(leftover); | 190 RecordSpan(leftover); |
| 191 |
| 192 // The previous span of |leftover| was just splitted -- no need to |
| 193 // coalesce them. The next span of |leftover| was not previously coalesced |
| 194 // with |span|, i.e. is NULL or has got location other than |old_location|. |
| 195 const PageID p = leftover->start; |
| 196 const Length len = leftover->length; |
| 197 Span* next = GetDescriptor(p+len); |
| 198 ASSERT (next == NULL || |
| 199 next->location == Span::IN_USE || |
| 200 next->location != leftover->location); |
| 201 |
178 PrependToFreeList(leftover); // Skip coalescing - no candidates possible | 202 PrependToFreeList(leftover); // Skip coalescing - no candidates possible |
179 span->length = n; | 203 span->length = n; |
180 pagemap_.set(span->start + n - 1, span); | 204 pagemap_.set(span->start + n - 1, span); |
181 } | 205 } |
182 ASSERT(Check()); | 206 ASSERT(Check()); |
| 207 if (old_location == Span::ON_RETURNED_FREELIST) { |
| 208 // We need to recommit this address space. |
| 209 CommitSpan(span); |
| 210 } |
| 211 ASSERT(span->location == Span::IN_USE); |
| 212 ASSERT(span->length == n); |
| 213 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
183 return span; | 214 return span; |
184 } | 215 } |
185 | 216 |
186 void PageHeap::Delete(Span* span) { | 217 void PageHeap::Delete(Span* span) { |
187 ASSERT(Check()); | 218 ASSERT(Check()); |
188 ASSERT(span->location == Span::IN_USE); | 219 ASSERT(span->location == Span::IN_USE); |
189 ASSERT(span->length > 0); | 220 ASSERT(span->length > 0); |
190 ASSERT(GetDescriptor(span->start) == span); | 221 ASSERT(GetDescriptor(span->start) == span); |
191 ASSERT(GetDescriptor(span->start + span->length - 1) == span); | 222 ASSERT(GetDescriptor(span->start + span->length - 1) == span); |
192 const Length n = span->length; | 223 const Length n = span->length; |
193 span->sizeclass = 0; | 224 span->sizeclass = 0; |
194 span->sample = 0; | 225 span->sample = 0; |
195 span->location = Span::ON_NORMAL_FREELIST; | 226 span->location = Span::ON_NORMAL_FREELIST; |
196 Event(span, 'D', span->length); | 227 Event(span, 'D', span->length); |
197 MergeIntoFreeList(span); // Coalesces if possible | 228 MergeIntoFreeList(span); // Coalesces if possible |
198 IncrementalScavenge(n); | 229 IncrementalScavenge(n); |
| 230 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
199 ASSERT(Check()); | 231 ASSERT(Check()); |
200 } | 232 } |
201 | 233 |
202 void PageHeap::MergeIntoFreeList(Span* span) { | 234 void PageHeap::MergeIntoFreeList(Span* span) { |
203 ASSERT(span->location != Span::IN_USE); | 235 ASSERT(span->location != Span::IN_USE); |
204 | 236 |
205 // Coalesce -- we guarantee that "p" != 0, so no bounds checking | 237 // Coalesce -- we guarantee that "p" != 0, so no bounds checking |
206 // necessary. We do not bother resetting the stale pagemap | 238 // necessary. We do not bother resetting the stale pagemap |
207 // entries for the pieces we are merging together because we only | 239 // entries for the pieces we are merging together because we only |
208 // care about the pagemap entries for the boundaries. | 240 // care about the pagemap entries for the boundaries. |
209 // | 241 // |
210 // Note that only similar spans are merged together. For example, | 242 // Note that the adjacent spans we merge into "span" may come out of a |
211 // we do not coalesce "returned" spans with "normal" spans. | 243 // "normal" (committed) list, and cleanly merge with our IN_USE span, which |
| 244 // is implicitly committed. If the adjacents spans are on the "returned" |
| 245 // (decommitted) list, then we must get both spans into the same state before |
| 246 // or after we coalesce them. The current code always decomits. This is |
| 247 // achieved by blindly decommitting the entire coalesced region, which may |
| 248 // include any combination of committed and decommitted spans, at the end of |
| 249 // the method. |
| 250 |
| 251 // TODO(jar): "Always decommit" causes some extra calls to commit when we are |
| 252 // called in GrowHeap() during an allocation :-/. We need to eval the cost of |
| 253 // that oscillation, and possibly do something to reduce it. |
| 254 |
| 255 // TODO(jar): We need a better strategy for deciding to commit, or decommit, |
| 256 // based on memory usage and free heap sizes. |
| 257 |
212 const PageID p = span->start; | 258 const PageID p = span->start; |
213 const Length n = span->length; | 259 const Length n = span->length; |
214 Span* prev = GetDescriptor(p-1); | 260 Span* prev = GetDescriptor(p-1); |
215 if (prev != NULL && prev->location == span->location) { | 261 if (prev != NULL && prev->location != Span::IN_USE) { |
216 // Merge preceding span into this span | 262 // Merge preceding span into this span |
217 ASSERT(prev->start + prev->length == p); | 263 ASSERT(prev->start + prev->length == p); |
218 const Length len = prev->length; | 264 const Length len = prev->length; |
| 265 if (prev->location == Span::ON_RETURNED_FREELIST) { |
| 266 // We're about to put the merge span into the returned freelist and call |
| 267 // DecommitSpan() on it, which will mark the entire span including this |
| 268 // one as released and decrease stats_.committed_bytes by the size of the |
| 269 // merged span. To make the math work out we temporarily increase the |
| 270 // stats_.committed_bytes amount. |
| 271 stats_.committed_bytes += prev->length << kPageShift; |
| 272 } |
219 RemoveFromFreeList(prev); | 273 RemoveFromFreeList(prev); |
220 DeleteSpan(prev); | 274 DeleteSpan(prev); |
221 span->start -= len; | 275 span->start -= len; |
222 span->length += len; | 276 span->length += len; |
223 pagemap_.set(span->start, span); | 277 pagemap_.set(span->start, span); |
224 Event(span, 'L', len); | 278 Event(span, 'L', len); |
225 } | 279 } |
226 Span* next = GetDescriptor(p+n); | 280 Span* next = GetDescriptor(p+n); |
227 if (next != NULL && next->location == span->location) { | 281 if (next != NULL && next->location != Span::IN_USE) { |
228 // Merge next span into this span | 282 // Merge next span into this span |
229 ASSERT(next->start == p+n); | 283 ASSERT(next->start == p+n); |
230 const Length len = next->length; | 284 const Length len = next->length; |
| 285 if (next->location == Span::ON_RETURNED_FREELIST) { |
| 286 // See the comment below 'if (prev->location ...' for explanation. |
| 287 stats_.committed_bytes += next->length << kPageShift; |
| 288 } |
231 RemoveFromFreeList(next); | 289 RemoveFromFreeList(next); |
232 DeleteSpan(next); | 290 DeleteSpan(next); |
233 span->length += len; | 291 span->length += len; |
234 pagemap_.set(span->start + span->length - 1, span); | 292 pagemap_.set(span->start + span->length - 1, span); |
235 Event(span, 'R', len); | 293 Event(span, 'R', len); |
236 } | 294 } |
237 | 295 |
| 296 Event(span, 'D', span->length); |
| 297 span->location = Span::ON_RETURNED_FREELIST; |
| 298 DecommitSpan(span); |
238 PrependToFreeList(span); | 299 PrependToFreeList(span); |
239 } | 300 } |
240 | 301 |
241 void PageHeap::PrependToFreeList(Span* span) { | 302 void PageHeap::PrependToFreeList(Span* span) { |
242 ASSERT(span->location != Span::IN_USE); | 303 ASSERT(span->location != Span::IN_USE); |
243 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_; | 304 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_; |
244 if (span->location == Span::ON_NORMAL_FREELIST) { | 305 if (span->location == Span::ON_NORMAL_FREELIST) { |
245 stats_.free_bytes += (span->length << kPageShift); | 306 stats_.free_bytes += (span->length << kPageShift); |
246 DLL_Prepend(&list->normal, span); | 307 DLL_Prepend(&list->normal, span); |
247 } else { | 308 } else { |
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
492 ask = n; | 553 ask = n; |
493 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 554 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
494 } | 555 } |
495 if (ptr == NULL) return false; | 556 if (ptr == NULL) return false; |
496 } | 557 } |
497 ask = actual_size >> kPageShift; | 558 ask = actual_size >> kPageShift; |
498 RecordGrowth(ask << kPageShift); | 559 RecordGrowth(ask << kPageShift); |
499 | 560 |
500 uint64_t old_system_bytes = stats_.system_bytes; | 561 uint64_t old_system_bytes = stats_.system_bytes; |
501 stats_.system_bytes += (ask << kPageShift); | 562 stats_.system_bytes += (ask << kPageShift); |
| 563 stats_.committed_bytes += (ask << kPageShift); |
502 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 564 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
503 ASSERT(p > 0); | 565 ASSERT(p > 0); |
504 | 566 |
505 // If we have already a lot of pages allocated, just pre allocate a bunch of | 567 // If we have already a lot of pages allocated, just pre allocate a bunch of |
506 // memory for the page map. This prevents fragmentation by pagemap metadata | 568 // memory for the page map. This prevents fragmentation by pagemap metadata |
507 // when a program keeps allocating and freeing large blocks. | 569 // when a program keeps allocating and freeing large blocks. |
508 | 570 |
509 if (old_system_bytes < kPageMapBigAllocationThreshold | 571 if (old_system_bytes < kPageMapBigAllocationThreshold |
510 && stats_.system_bytes >= kPageMapBigAllocationThreshold) { | 572 && stats_.system_bytes >= kPageMapBigAllocationThreshold) { |
511 pagemap_.PreallocateMoreMemory(); | 573 pagemap_.PreallocateMoreMemory(); |
512 } | 574 } |
513 | 575 |
514 // Make sure pagemap_ has entries for all of the new pages. | 576 // Make sure pagemap_ has entries for all of the new pages. |
515 // Plus ensure one before and one after so coalescing code | 577 // Plus ensure one before and one after so coalescing code |
516 // does not need bounds-checking. | 578 // does not need bounds-checking. |
517 if (pagemap_.Ensure(p-1, ask+2)) { | 579 if (pagemap_.Ensure(p-1, ask+2)) { |
518 // Pretend the new area is allocated and then Delete() it to cause | 580 // Pretend the new area is allocated and then Delete() it to cause |
519 // any necessary coalescing to occur. | 581 // any necessary coalescing to occur. |
520 Span* span = NewSpan(p, ask); | 582 Span* span = NewSpan(p, ask); |
521 RecordSpan(span); | 583 RecordSpan(span); |
522 Delete(span); | 584 Delete(span); |
| 585 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
523 ASSERT(Check()); | 586 ASSERT(Check()); |
524 return true; | 587 return true; |
525 } else { | 588 } else { |
526 // We could not allocate memory within "pagemap_" | 589 // We could not allocate memory within "pagemap_" |
527 // TODO: Once we can return memory to the system, return the new span | 590 // TODO: Once we can return memory to the system, return the new span |
528 return false; | 591 return false; |
529 } | 592 } |
530 } | 593 } |
531 | 594 |
532 bool PageHeap::Check() { | 595 bool PageHeap::Check() { |
(...skipping 19 matching lines...) Expand all Loading... |
552 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED | 615 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED |
553 CHECK_CONDITION(s->length >= min_pages); | 616 CHECK_CONDITION(s->length >= min_pages); |
554 CHECK_CONDITION(s->length <= max_pages); | 617 CHECK_CONDITION(s->length <= max_pages); |
555 CHECK_CONDITION(GetDescriptor(s->start) == s); | 618 CHECK_CONDITION(GetDescriptor(s->start) == s); |
556 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); | 619 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); |
557 } | 620 } |
558 return true; | 621 return true; |
559 } | 622 } |
560 | 623 |
561 } // namespace tcmalloc | 624 } // namespace tcmalloc |
OLD | NEW |