| OLD | NEW |
| 1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 144 ASSERT(leftover->location == Span::IN_USE); | 144 ASSERT(leftover->location == Span::IN_USE); |
| 145 Event(leftover, 'U', extra); | 145 Event(leftover, 'U', extra); |
| 146 RecordSpan(leftover); | 146 RecordSpan(leftover); |
| 147 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | 147 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span |
| 148 span->length = n; | 148 span->length = n; |
| 149 | 149 |
| 150 return leftover; | 150 return leftover; |
| 151 } | 151 } |
| 152 | 152 |
| 153 void PageHeap::CommitSpan(Span* span) { | 153 void PageHeap::CommitSpan(Span* span) { |
| 154 TCMalloc_SystemCommit( | 154 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), |
| 155 reinterpret_cast<void*>(span->start << kPageShift), | 155 static_cast<size_t>(span->length << kPageShift)); |
| 156 static_cast<size_t>(span->length << kPageShift) | 156 } |
| 157 ); | 157 |
| 158 void PageHeap::DecommitSpan(Span* span) { |
| 159 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
| 160 static_cast<size_t>(span->length << kPageShift)); |
| 158 } | 161 } |
| 159 | 162 |
| 160 Span* PageHeap::Carve(Span* span, Length n) { | 163 Span* PageHeap::Carve(Span* span, Length n) { |
| 161 ASSERT(n > 0); | 164 ASSERT(n > 0); |
| 162 ASSERT(span->location != Span::IN_USE); | 165 ASSERT(span->location != Span::IN_USE); |
| 163 const int old_location = span->location; | 166 const int old_location = span->location; |
| 164 DLL_Remove(span); | 167 DLL_Remove(span); |
| 165 span->location = Span::IN_USE; | 168 span->location = Span::IN_USE; |
| 166 Event(span, 'A', n); | 169 Event(span, 'A', n); |
| 167 | 170 |
| 168 const int extra = span->length - n; | 171 const int extra = span->length - n; |
| 169 ASSERT(extra >= 0); | 172 ASSERT(extra >= 0); |
| 170 if (extra > 0) { | 173 if (extra > 0) { |
| 171 Span* leftover = NewSpan(span->start + n, extra); | 174 Span* leftover = NewSpan(span->start + n, extra); |
| 172 leftover->location = old_location; | 175 leftover->location = old_location; |
| 173 Event(leftover, 'S', extra); | 176 Event(leftover, 'S', extra); |
| 174 RecordSpan(leftover); | 177 RecordSpan(leftover); |
| 175 | 178 |
| 176 // Place leftover span on appropriate free list | 179 // Place leftover span on appropriate free list |
| 177 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; | 180 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; |
| 178 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST | 181 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST |
| 179 ? &listpair->returned : &listpair->normal); | 182 ? &listpair->returned : &listpair->normal); |
| 180 DLL_Prepend(dst, leftover); | 183 DLL_Prepend(dst, leftover); |
| 181 | 184 |
| 182 span->length = n; | 185 span->length = n; |
| 183 pagemap_.set(span->start + n - 1, span); | 186 pagemap_.set(span->start + n - 1, span); |
| 184 } | 187 } |
| 188 ASSERT(Check()); |
| 189 free_pages_ -= n; |
| 185 if (old_location == Span::ON_RETURNED_FREELIST) { | 190 if (old_location == Span::ON_RETURNED_FREELIST) { |
| 186 // We need to recommit this address space. | 191 // We need to recommit this address space. |
| 187 CommitSpan(span); | 192 CommitSpan(span); |
| 188 } | 193 } |
| 189 ASSERT(span->location == Span::IN_USE); | 194 ASSERT(span->location == Span::IN_USE); |
| 190 ASSERT(span->length == n); | 195 ASSERT(span->length == n); |
| 191 ASSERT(Check()); | |
| 192 free_pages_ -= n; | |
| 193 return span; | 196 return span; |
| 194 } | 197 } |
| 195 | 198 |
| 196 void PageHeap::Delete(Span* span) { | 199 void PageHeap::Delete(Span* span) { |
| 197 ASSERT(Check()); | 200 ASSERT(Check()); |
| 198 ASSERT(span->location == Span::IN_USE); | 201 ASSERT(span->location == Span::IN_USE); |
| 199 ASSERT(span->length > 0); | 202 ASSERT(span->length > 0); |
| 200 ASSERT(GetDescriptor(span->start) == span); | 203 ASSERT(GetDescriptor(span->start) == span); |
| 201 ASSERT(GetDescriptor(span->start + span->length - 1) == span); | 204 ASSERT(GetDescriptor(span->start + span->length - 1) == span); |
| 202 span->sizeclass = 0; | 205 span->sizeclass = 0; |
| 203 span->sample = 0; | 206 span->sample = 0; |
| 204 | 207 |
| 205 // Coalesce -- we guarantee that "p" != 0, so no bounds checking | 208 // Coalesce -- we guarantee that "p" != 0, so no bounds checking |
| 206 // necessary. We do not bother resetting the stale pagemap | 209 // necessary. We do not bother resetting the stale pagemap |
| 207 // entries for the pieces we are merging together because we only | 210 // entries for the pieces we are merging together because we only |
| 208 // care about the pagemap entries for the boundaries. | 211 // care about the pagemap entries for the boundaries. |
| 209 // | 212 // |
| 210 // Note that the spans we merge into "span" may come out of | 213 // Note that the adjacent spans we merge into "span" may come out of a |
| 211 // a "returned" list. We move those into "normal" list | 214 // "normal" (committed) list, and cleanly merge with our IN_USE span, which |
| 212 // as for 'immediate' operations we favour committing over | 215 // is implicitly committed. If the adjacents spans are on the "returned" |
| 213 // decommitting (decommitting is performed offline). | 216 // (decommitted) list, then we must get both spans into the same state before |
| 217 // or after we coalesce them. The current code always decomits. This is |
| 218 // achieved by blindly decommitting the entire coalesced region, which may |
| 219 // include any combination of committed and decommitted spans, at the end of |
| 220 // the method. |
| 221 |
| 222 // TODO(jar): "Always decommit" causes some extra calls to commit when we are |
| 223 // called in GrowHeap() during an allocation :-/. We need to eval the cost of |
| 224 // that oscillation, and possibly do something to reduce it. |
| 225 |
| 226 // TODO(jar): We need a better strategy for deciding to commit, or decommit, |
| 227 // based on memory usage and free heap sizes. |
| 228 |
| 214 const PageID p = span->start; | 229 const PageID p = span->start; |
| 215 const Length n = span->length; | 230 const Length n = span->length; |
| 216 Span* prev = GetDescriptor(p-1); | 231 Span* prev = GetDescriptor(p-1); |
| 217 if (prev != NULL && prev->location != Span::IN_USE) { | 232 if (prev != NULL && prev->location != Span::IN_USE) { |
| 218 // Merge preceding span into this span | 233 // Merge preceding span into this span |
| 219 ASSERT(prev->start + prev->length == p); | 234 ASSERT(prev->start + prev->length == p); |
| 220 const Length len = prev->length; | 235 const Length len = prev->length; |
| 221 if (prev->location == Span::ON_RETURNED_FREELIST) { | |
| 222 CommitSpan(prev); | |
| 223 } | |
| 224 DLL_Remove(prev); | 236 DLL_Remove(prev); |
| 225 DeleteSpan(prev); | 237 DeleteSpan(prev); |
| 226 span->start -= len; | 238 span->start -= len; |
| 227 span->length += len; | 239 span->length += len; |
| 228 pagemap_.set(span->start, span); | 240 pagemap_.set(span->start, span); |
| 229 Event(span, 'L', len); | 241 Event(span, 'L', len); |
| 230 } | 242 } |
| 231 Span* next = GetDescriptor(p+n); | 243 Span* next = GetDescriptor(p+n); |
| 232 if (next != NULL && next->location != Span::IN_USE) { | 244 if (next != NULL && next->location != Span::IN_USE) { |
| 233 // Merge next span into this span | 245 // Merge next span into this span |
| 234 ASSERT(next->start == p+n); | 246 ASSERT(next->start == p+n); |
| 235 const Length len = next->length; | 247 const Length len = next->length; |
| 236 if (next->location == Span::ON_RETURNED_FREELIST) { | |
| 237 CommitSpan(next); | |
| 238 } | |
| 239 DLL_Remove(next); | 248 DLL_Remove(next); |
| 240 DeleteSpan(next); | 249 DeleteSpan(next); |
| 241 span->length += len; | 250 span->length += len; |
| 242 pagemap_.set(span->start + span->length - 1, span); | 251 pagemap_.set(span->start + span->length - 1, span); |
| 243 Event(span, 'R', len); | 252 Event(span, 'R', len); |
| 244 } | 253 } |
| 245 | 254 |
| 246 Event(span, 'D', span->length); | 255 Event(span, 'D', span->length); |
| 247 span->location = Span::ON_NORMAL_FREELIST; | 256 span->location = Span::ON_RETURNED_FREELIST; |
| 257 DecommitSpan(span); |
| 248 if (span->length < kMaxPages) { | 258 if (span->length < kMaxPages) { |
| 249 DLL_Prepend(&free_[span->length].normal, span); | 259 DLL_Prepend(&free_[span->length].returned, span); |
| 250 } else { | 260 } else { |
| 251 DLL_Prepend(&large_.normal, span); | 261 DLL_Prepend(&large_.returned, span); |
| 252 } | 262 } |
| 253 free_pages_ += n; | 263 free_pages_ += n; |
| 254 | 264 |
| 255 IncrementalScavenge(n); | 265 IncrementalScavenge(n); |
| 256 ASSERT(Check()); | 266 ASSERT(Check()); |
| 257 } | 267 } |
| 258 | 268 |
| 259 void PageHeap::IncrementalScavenge(Length n) { | 269 void PageHeap::IncrementalScavenge(Length n) { |
| 260 // Fast path; not yet time to release memory | 270 // Fast path; not yet time to release memory |
| 261 scavenge_counter_ -= n; | 271 scavenge_counter_ -= n; |
| (...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 497 | 507 |
| 498 void PageHeap::ReleaseFreePages() { | 508 void PageHeap::ReleaseFreePages() { |
| 499 for (Length s = 0; s < kMaxPages; s++) { | 509 for (Length s = 0; s < kMaxPages; s++) { |
| 500 ReleaseFreeList(&free_[s].normal, &free_[s].returned); | 510 ReleaseFreeList(&free_[s].normal, &free_[s].returned); |
| 501 } | 511 } |
| 502 ReleaseFreeList(&large_.normal, &large_.returned); | 512 ReleaseFreeList(&large_.normal, &large_.returned); |
| 503 ASSERT(Check()); | 513 ASSERT(Check()); |
| 504 } | 514 } |
| 505 | 515 |
| 506 } // namespace tcmalloc | 516 } // namespace tcmalloc |
| OLD | NEW |