OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2008, Google Inc. |
| 2 // All rights reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are |
| 6 // met: |
| 7 // |
| 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above |
| 11 // copyright notice, this list of conditions and the following disclaimer |
| 12 // in the documentation and/or other materials provided with the |
| 13 // distribution. |
| 14 // * Neither the name of Google Inc. nor the names of its |
| 15 // contributors may be used to endorse or promote products derived from |
| 16 // this software without specific prior written permission. |
| 17 // |
| 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 |
| 30 // --- |
| 31 // Author: Sanjay Ghemawat <opensource@google.com> |
| 32 |
| 33 #ifndef TCMALLOC_PAGE_HEAP_H_ |
| 34 #define TCMALLOC_PAGE_HEAP_H_ |
| 35 |
| 36 #include <config.h> |
| 37 #include "common.h" |
| 38 #include "packed-cache-inl.h" |
| 39 #include "pagemap.h" |
| 40 #include "span.h" |
| 41 |
| 42 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if |
| 43 // you're porting to a system where you really can't get a stacktrace. |
| 44 #ifdef NO_TCMALLOC_SAMPLES |
| 45 // We use #define so code compiles even if you #include stacktrace.h somehow. |
| 46 # define GetStackTrace(stack, depth, skip) (0) |
| 47 #else |
| 48 # include <google/stacktrace.h> |
| 49 #endif |
| 50 |
| 51 |
| 52 namespace tcmalloc { |
| 53 |
| 54 // ------------------------------------------------------------------------- |
| 55 // Map from page-id to per-page data |
| 56 // ------------------------------------------------------------------------- |
| 57 |
| 58 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. |
| 59 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, |
| 60 // because sometimes the sizeclass is all the information we need. |
| 61 |
| 62 // Selector class -- general selector uses 3-level map |
| 63 template <int BITS> class MapSelector { |
| 64 public: |
| 65 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; |
| 66 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; |
| 67 }; |
| 68 |
| 69 // A two-level map for 32-bit machines |
| 70 template <> class MapSelector<32> { |
| 71 public: |
| 72 typedef TCMalloc_PageMap2<32-kPageShift> Type; |
| 73 typedef PackedCache<32-kPageShift, uint16_t> CacheType; |
| 74 }; |
| 75 |
| 76 // ------------------------------------------------------------------------- |
| 77 // Page-level allocator |
| 78 // * Eager coalescing |
| 79 // |
| 80 // Heap for page-level allocation. We allow allocating and freeing a |
| 81 // contiguous runs of pages (called a "span"). |
| 82 // ------------------------------------------------------------------------- |
| 83 |
| 84 class PageHeap { |
| 85 public: |
| 86 PageHeap(); |
| 87 |
| 88 // Allocate a run of "n" pages. Returns zero if out of memory. |
| 89 // Caller should not pass "n == 0" -- instead, n should have |
| 90 // been rounded up already. |
| 91 Span* New(Length n); |
| 92 |
| 93 // Delete the span "[p, p+n-1]". |
| 94 // REQUIRES: span was returned by earlier call to New() and |
| 95 // has not yet been deleted. |
| 96 void Delete(Span* span); |
| 97 |
| 98 // Mark an allocated span as being used for small objects of the |
| 99 // specified size-class. |
| 100 // REQUIRES: span was returned by an earlier call to New() |
| 101 // and has not yet been deleted. |
| 102 void RegisterSizeClass(Span* span, size_t sc); |
| 103 |
| 104 // Split an allocated span into two spans: one of length "n" pages |
| 105 // followed by another span of length "span->length - n" pages. |
| 106 // Modifies "*span" to point to the first span of length "n" pages. |
| 107 // Returns a pointer to the second span. |
| 108 // |
| 109 // REQUIRES: "0 < n < span->length" |
| 110 // REQUIRES: span->location == IN_USE |
| 111 // REQUIRES: span->sizeclass == 0 |
| 112 Span* Split(Span* span, Length n); |
| 113 |
| 114 // Return the descriptor for the specified page. |
| 115 inline Span* GetDescriptor(PageID p) const { |
| 116 return reinterpret_cast<Span*>(pagemap_.get(p)); |
| 117 } |
| 118 |
| 119 // Dump state to stderr |
| 120 void Dump(TCMalloc_Printer* out); |
| 121 |
| 122 // Return number of bytes allocated from system |
| 123 inline uint64_t SystemBytes() const { return system_bytes_; } |
| 124 |
| 125 // Return number of free bytes in heap |
| 126 uint64_t FreeBytes() const { |
| 127 return (static_cast<uint64_t>(free_pages_) << kPageShift); |
| 128 } |
| 129 |
| 130 bool Check(); |
| 131 // Like Check() but does some more comprehensive checking. |
| 132 bool CheckExpensive(); |
| 133 bool CheckList(Span* list, Length min_pages, Length max_pages, |
| 134 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST |
| 135 |
| 136 // Release all pages on the free list for reuse by the OS: |
| 137 void ReleaseFreePages(); |
| 138 |
| 139 // Return 0 if we have no information, or else the correct sizeclass for p. |
| 140 // Reads and writes to pagemap_cache_ do not require locking. |
| 141 // The entries are 64 bits on 64-bit hardware and 16 bits on |
| 142 // 32-bit hardware, and we don't mind raciness as long as each read of |
| 143 // an entry yields a valid entry, not a partially updated entry. |
| 144 size_t GetSizeClassIfCached(PageID p) const { |
| 145 return pagemap_cache_.GetOrDefault(p, 0); |
| 146 } |
| 147 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } |
| 148 |
| 149 // Attempt to free some free pages currently not used. |
| 150 void Scavenge(); |
| 151 |
| 152 private: |
| 153 // Allocates a big block of memory for the pagemap once we reach more than |
| 154 // 128MB |
| 155 static const size_t kPageMapBigAllocationThreshold = 128 << 20; |
| 156 |
| 157 // Minimum number of pages to fetch from system at a time. Must be |
| 158 // significantly bigger than kBlockSize to amortize system-call |
| 159 // overhead, and also to reduce external fragementation. Also, we |
| 160 // should keep this value big because various incarnations of Linux |
| 161 // have small limits on the number of mmap() regions per |
| 162 // address-space. |
| 163 static const int kMinSystemAlloc = 1 << (20 - kPageShift); |
| 164 |
| 165 // For all span-lengths < kMaxPages we keep an exact-size list. |
| 166 // REQUIRED: kMaxPages >= kMinSystemAlloc; |
| 167 static const size_t kMaxPages = kMinSystemAlloc; |
| 168 |
| 169 // Pick the appropriate map and cache types based on pointer size |
| 170 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; |
| 171 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; |
| 172 PageMap pagemap_; |
| 173 mutable PageMapCache pagemap_cache_; |
| 174 |
| 175 // We segregate spans of a given size into two circular linked |
| 176 // lists: one for normal spans, and one for spans whose memory |
| 177 // has been returned to the system. |
| 178 struct SpanList { |
| 179 Span normal; |
| 180 Span returned; |
| 181 }; |
| 182 |
| 183 // List of free spans of length >= kMaxPages |
| 184 SpanList large_; |
| 185 |
| 186 // Array mapping from span length to a doubly linked list of free spans |
| 187 SpanList free_[kMaxPages]; |
| 188 |
| 189 // Number of pages kept in free lists |
| 190 uintptr_t free_pages_; |
| 191 |
| 192 // Bytes allocated from system |
| 193 uint64_t system_bytes_; |
| 194 |
| 195 bool GrowHeap(Length n); |
| 196 |
| 197 // REQUIRES: span->length >= n |
| 198 // REQUIRES: span->location != IN_USE |
| 199 // Remove span from its free list, and move any leftover part of |
| 200 // span into appropriate free lists. Also update "span" to have |
| 201 // length exactly "n" and mark it as non-free so it can be returned |
| 202 // to the client. After all that, decrease free_pages_ by n and |
| 203 // return span. |
| 204 Span* Carve(Span* span, Length n); |
| 205 |
| 206 void RecordSpan(Span* span) { |
| 207 pagemap_.set(span->start, span); |
| 208 if (span->length > 1) { |
| 209 pagemap_.set(span->start + span->length - 1, span); |
| 210 } |
| 211 } |
| 212 |
| 213 // Allocate a large span of length == n. If successful, returns a |
| 214 // span of exactly the specified length. Else, returns NULL. |
| 215 Span* AllocLarge(Length n); |
| 216 |
| 217 // Commits the span. |
| 218 void CommitSpan(Span* span); |
| 219 |
| 220 #if DEFER_DECOMMIT |
| 221 // Number of free committed pages that we want to keep around. |
| 222 static const size_t kMinimumFreeCommittedPageCount = 512; // 2M (2 ** 21) for
4K pages |
| 223 |
| 224 // During a scavenge, we'll release up to a fraction of the free committed pag
es. |
| 225 #ifdef _WIN32 |
| 226 // We are slightly less aggressive in releasing memory on Windows due to perfo
rmance reasons. |
| 227 static const int kMaxScavengeAmountFactor = 3; |
| 228 #else |
| 229 static const int kMaxScavengeAmountFactor = 2; |
| 230 #endif |
| 231 |
| 232 // Decommits some parts from SpanList. |
| 233 uint64_t DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit); |
| 234 |
| 235 // Decommits some parts from SpanList. |
| 236 Length DecommitLastSpan(SpanList* span_list, Span* span); |
| 237 |
| 238 // Number of pages kept in free lists that are still committed a.k.a. total |
| 239 // number of pages in "normal" lists. |
| 240 uint64_t free_committed_pages_; |
| 241 |
| 242 // Number of pages that we commited in the last scavenge wait interval. |
| 243 uint64_t pages_committed_since_last_scavenge_; |
| 244 #else |
| 245 // Incrementally release some memory to the system. |
| 246 // IncrementalScavenge(n) is called whenever n pages are freed. |
| 247 void IncrementalScavenge(Length n); |
| 248 |
| 249 // Number of pages to deallocate before doing more scavenging |
| 250 int64_t scavenge_counter_; |
| 251 #endif |
| 252 |
| 253 // Index of last free list we scavenged |
| 254 int scavenge_index_; |
| 255 }; |
| 256 |
| 257 } // namespace tcmalloc |
| 258 |
| 259 #endif // TCMALLOC_PAGE_HEAP_H_ |
OLD | NEW |