Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Side by Side Diff: third_party/tcmalloc/chromium/src/page_heap.h

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 16 matching lines...) Expand all
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 // --- 30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com> 31 // Author: Sanjay Ghemawat <opensource@google.com>
32 32
33 #ifndef TCMALLOC_PAGE_HEAP_H_ 33 #ifndef TCMALLOC_PAGE_HEAP_H_
34 #define TCMALLOC_PAGE_HEAP_H_ 34 #define TCMALLOC_PAGE_HEAP_H_
35 35
36 #include <config.h> 36 #include <config.h>
37 #include <google/malloc_extension.h>
37 #include "common.h" 38 #include "common.h"
38 #include "packed-cache-inl.h" 39 #include "packed-cache-inl.h"
39 #include "pagemap.h" 40 #include "pagemap.h"
40 #include "span.h" 41 #include "span.h"
41 42
43 // We need to dllexport PageHeap just for the unittest. MSVC complains
44 // that we don't dllexport the PageHeap members, but we don't need to
45 // test those, so I just suppress this warning.
46 #ifdef _MSC_VER
47 #pragma warning(push)
48 #pragma warning(disable:4251)
49 #endif
50
42 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if 51 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
43 // you're porting to a system where you really can't get a stacktrace. 52 // you're porting to a system where you really can't get a stacktrace.
44 #ifdef NO_TCMALLOC_SAMPLES 53 #ifdef NO_TCMALLOC_SAMPLES
45 // We use #define so code compiles even if you #include stacktrace.h somehow. 54 // We use #define so code compiles even if you #include stacktrace.h somehow.
46 # define GetStackTrace(stack, depth, skip) (0) 55 # define GetStackTrace(stack, depth, skip) (0)
47 #else 56 #else
48 # include <google/stacktrace.h> 57 # include <google/stacktrace.h>
49 #endif 58 #endif
50 59
51 namespace tcmalloc { 60 namespace tcmalloc {
52 61
53 // ------------------------------------------------------------------------- 62 // -------------------------------------------------------------------------
54 // Map from page-id to per-page data 63 // Map from page-id to per-page data
55 // ------------------------------------------------------------------------- 64 // -------------------------------------------------------------------------
56 65
57 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. 66 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
58 // ...except... 67 // ...except...
59 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines. 68 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines.
60 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, 69 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
61 // because sometimes the sizeclass is all the information we need. 70 // because sometimes the sizeclass is all the information we need.
62 71
63 // Selector class -- general selector uses 3-level map 72 // Selector class -- general selector uses 3-level map
64 template <int BITS> class MapSelector { 73 template <int BITS> class MapSelector {
65 public: 74 public:
66 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; 75 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
67 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; 76 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
68 }; 77 };
69 78
79 // A two-level map for 32-bit machines
70 template <> class MapSelector<32> { 80 template <> class MapSelector<32> {
71 public: 81 public:
72 #ifdef WIN32 82 #ifdef WIN32
73 // A flat map for 32-bit machines (with lazy commit of memory). 83 // A flat map for 32-bit machines (with lazy commit of memory).
74 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type; 84 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type;
75 #else 85 #else
76 // A two-level map for 32-bit machines 86 // A two-level map for 32-bit machines
77 typedef TCMalloc_PageMap2<32-kPageShift> Type; 87 typedef TCMalloc_PageMap2<32-kPageShift> Type;
78 #endif 88 #endif
79 typedef PackedCache<32-kPageShift, uint16_t> CacheType; 89 typedef PackedCache<32-kPageShift, uint16_t> CacheType;
80 }; 90 };
81 91
82 // ------------------------------------------------------------------------- 92 // -------------------------------------------------------------------------
83 // Page-level allocator 93 // Page-level allocator
84 // * Eager coalescing 94 // * Eager coalescing
85 // 95 //
86 // Heap for page-level allocation. We allow allocating and freeing a 96 // Heap for page-level allocation. We allow allocating and freeing a
87 // contiguous runs of pages (called a "span"). 97 // contiguous runs of pages (called a "span").
88 // ------------------------------------------------------------------------- 98 // -------------------------------------------------------------------------
89 99
90 class PageHeap { 100 class PERFTOOLS_DLL_DECL PageHeap {
91 public: 101 public:
92 PageHeap(); 102 PageHeap();
93 103
94 // Allocate a run of "n" pages. Returns zero if out of memory. 104 // Allocate a run of "n" pages. Returns zero if out of memory.
95 // Caller should not pass "n == 0" -- instead, n should have 105 // Caller should not pass "n == 0" -- instead, n should have
96 // been rounded up already. 106 // been rounded up already.
97 Span* New(Length n); 107 Span* New(Length n);
98 108
99 // Delete the span "[p, p+n-1]". 109 // Delete the span "[p, p+n-1]".
100 // REQUIRES: span was returned by earlier call to New() and 110 // REQUIRES: span was returned by earlier call to New() and
101 // has not yet been deleted. 111 // has not yet been deleted.
102 void Delete(Span* span); 112 void Delete(Span* span);
103 113
104 // Mark an allocated span as being used for small objects of the 114 // Mark an allocated span as being used for small objects of the
105 // specified size-class. 115 // specified size-class.
106 // REQUIRES: span was returned by an earlier call to New() 116 // REQUIRES: span was returned by an earlier call to New()
107 // and has not yet been deleted. 117 // and has not yet been deleted.
108 void RegisterSizeClass(Span* span, size_t sc); 118 void RegisterSizeClass(Span* span, size_t sc);
109 119
110 // Split an allocated span into two spans: one of length "n" pages 120 // Split an allocated span into two spans: one of length "n" pages
111 // followed by another span of length "span->length - n" pages. 121 // followed by another span of length "span->length - n" pages.
112 // Modifies "*span" to point to the first span of length "n" pages. 122 // Modifies "*span" to point to the first span of length "n" pages.
113 // Returns a pointer to the second span. 123 // Returns a pointer to the second span.
114 // 124 //
115 // REQUIRES: "0 < n < span->length" 125 // REQUIRES: "0 < n < span->length"
116 // REQUIRES: span->location == IN_USE 126 // REQUIRES: span->location == IN_USE
117 // REQUIRES: span->sizeclass == 0 127 // REQUIRES: span->sizeclass == 0
118 Span* Split(Span* span, Length n); 128 Span* Split(Span* span, Length n);
119 129
120 // Return the descriptor for the specified page. 130 // Return the descriptor for the specified page. Returns NULL if
131 // this PageID was not allocated previously.
121 inline Span* GetDescriptor(PageID p) const { 132 inline Span* GetDescriptor(PageID p) const {
122 return reinterpret_cast<Span*>(pagemap_.get(p)); 133 return reinterpret_cast<Span*>(pagemap_.get(p));
123 } 134 }
124 135
125 // Dump state to stderr 136 // Dump state to stderr
126 void Dump(TCMalloc_Printer* out); 137 void Dump(TCMalloc_Printer* out);
127 138
128 // Return number of bytes allocated from system 139 // If this page heap is managing a range with starting page # >= start,
129 inline uint64_t SystemBytes() const { return system_bytes_; } 140 // store info about the range in *r and return true. Else return false.
141 bool GetNextRange(PageID start, base::MallocRange* r);
130 142
131 inline uint64_t CommittedBytes() const { return committed_bytes_; } 143 // Page heap statistics
144 struct Stats {
145 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {}
146 uint64_t system_bytes; // Total bytes allocated from system
147 uint64_t free_bytes; // Total bytes on normal freelists
148 uint64_t unmapped_bytes; // Total bytes on returned freelists
149 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
132 150
133 // Return number of free bytes in heap 151 };
134 uint64_t FreeBytes() const { 152 inline Stats stats() const { return stats_; }
135 return (static_cast<uint64_t>(free_pages_) << kPageShift);
136 }
137 153
138 bool Check(); 154 bool Check();
139 // Like Check() but does some more comprehensive checking. 155 // Like Check() but does some more comprehensive checking.
140 bool CheckExpensive(); 156 bool CheckExpensive();
141 bool CheckList(Span* list, Length min_pages, Length max_pages, 157 bool CheckList(Span* list, Length min_pages, Length max_pages,
142 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST 158 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
143 159
144 // Release all pages on the free list for reuse by the OS: 160 // Try to release at least num_pages for reuse by the OS. Returns
145 void ReleaseFreePages(); 161 // the actual number of pages released, which may be less than
162 // num_pages if there weren't enough pages to release. The result
163 // may also be larger than num_pages since page_heap might decide to
164 // release one large range instead of fragmenting it into two
165 // smaller released and unreleased ranges.
166 Length ReleaseAtLeastNPages(Length num_pages);
146 167
147 // Return 0 if we have no information, or else the correct sizeclass for p. 168 // Return 0 if we have no information, or else the correct sizeclass for p.
148 // Reads and writes to pagemap_cache_ do not require locking. 169 // Reads and writes to pagemap_cache_ do not require locking.
149 // The entries are 64 bits on 64-bit hardware and 16 bits on 170 // The entries are 64 bits on 64-bit hardware and 16 bits on
150 // 32-bit hardware, and we don't mind raciness as long as each read of 171 // 32-bit hardware, and we don't mind raciness as long as each read of
151 // an entry yields a valid entry, not a partially updated entry. 172 // an entry yields a valid entry, not a partially updated entry.
152 size_t GetSizeClassIfCached(PageID p) const { 173 size_t GetSizeClassIfCached(PageID p) const {
153 return pagemap_cache_.GetOrDefault(p, 0); 174 return pagemap_cache_.GetOrDefault(p, 0);
154 } 175 }
155 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } 176 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
156 177
157 private: 178 private:
158 // Allocates a big block of memory for the pagemap once we reach more than 179 // Allocates a big block of memory for the pagemap once we reach more than
159 // 128MB 180 // 128MB
160 static const size_t kPageMapBigAllocationThreshold = 128 << 20; 181 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
161 182
162 // Minimum number of pages to fetch from system at a time. Must be 183 // Minimum number of pages to fetch from system at a time. Must be
163 // significantly bigger than kBlockSize to amortize system-call 184 // significantly bigger than kBlockSize to amortize system-call
164 // overhead, and also to reduce external fragementation. Also, we 185 // overhead, and also to reduce external fragementation. Also, we
165 // should keep this value big because various incarnations of Linux 186 // should keep this value big because various incarnations of Linux
166 // have small limits on the number of mmap() regions per 187 // have small limits on the number of mmap() regions per
167 // address-space. 188 // address-space.
168 static const int kMinSystemAlloc = 1 << (20 - kPageShift); 189 static const int kMinSystemAlloc = 1 << (20 - kPageShift);
169 190
170 // For all span-lengths < kMaxPages we keep an exact-size list. 191 // For all span-lengths < kMaxPages we keep an exact-size list.
171 // REQUIRED: kMaxPages >= kMinSystemAlloc; 192 // REQUIRED: kMaxPages >= kMinSystemAlloc;
172 static const size_t kMaxPages = kMinSystemAlloc; 193 static const size_t kMaxPages = kMinSystemAlloc;
173 194
195 // Never delay scavenging for more than the following number of
196 // deallocated pages. With 4K pages, this comes to 4GB of
197 // deallocation.
198 // Chrome: Changed to 64MB
199 static const int kMaxReleaseDelay = 1 << 14;
200
201 // If there is nothing to release, wait for so many pages before
202 // scavenging again. With 4K pages, this comes to 1GB of memory.
203 // Chrome: Changed to 16MB
204 static const int kDefaultReleaseDelay = 1 << 12;
205
174 // Pick the appropriate map and cache types based on pointer size 206 // Pick the appropriate map and cache types based on pointer size
175 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; 207 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
176 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; 208 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
177 PageMap pagemap_; 209 PageMap pagemap_;
178 mutable PageMapCache pagemap_cache_; 210 mutable PageMapCache pagemap_cache_;
179 211
180 // We segregate spans of a given size into two circular linked 212 // We segregate spans of a given size into two circular linked
181 // lists: one for normal spans, and one for spans whose memory 213 // lists: one for normal spans, and one for spans whose memory
182 // has been returned to the system. 214 // has been returned to the system.
183 struct SpanList { 215 struct SpanList {
184 Span normal; 216 Span normal;
185 Span returned; 217 Span returned;
186 }; 218 };
187 219
188 // List of free spans of length >= kMaxPages 220 // List of free spans of length >= kMaxPages
189 SpanList large_; 221 SpanList large_;
190 222
191 // Array mapping from span length to a doubly linked list of free spans 223 // Array mapping from span length to a doubly linked list of free spans
192 SpanList free_[kMaxPages]; 224 SpanList free_[kMaxPages];
193 225
194 // Number of pages kept in free lists 226 // Statistics on system, free, and unmapped bytes
195 uintptr_t free_pages_; 227 Stats stats_;
196
197 // Bytes allocated from system
198 uint64_t system_bytes_;
199
200 // Bytes committed, always <= system_bytes_.
201 uint64_t committed_bytes_;
202
203 bool GrowHeap(Length n); 228 bool GrowHeap(Length n);
204 229
205 // REQUIRES: span->length >= n 230 // REQUIRES: span->length >= n
206 // REQUIRES: span->location != IN_USE 231 // REQUIRES: span->location != IN_USE
207 // Remove span from its free list, and move any leftover part of 232 // Remove span from its free list, and move any leftover part of
208 // span into appropriate free lists. Also update "span" to have 233 // span into appropriate free lists. Also update "span" to have
209 // length exactly "n" and mark it as non-free so it can be returned 234 // length exactly "n" and mark it as non-free so it can be returned
210 // to the client. After all that, decrease free_pages_ by n and 235 // to the client. After all that, decrease free_pages_ by n and
211 // return span. 236 // return span.
212 Span* Carve(Span* span, Length n); 237 Span* Carve(Span* span, Length n);
213 238
214 void RecordSpan(Span* span) { 239 void RecordSpan(Span* span) {
215 pagemap_.set(span->start, span); 240 pagemap_.set(span->start, span);
216 if (span->length > 1) { 241 if (span->length > 1) {
217 pagemap_.set(span->start + span->length - 1, span); 242 pagemap_.set(span->start + span->length - 1, span);
218 } 243 }
219 } 244 }
220 245
221 // Allocate a large span of length == n. If successful, returns a 246 // Allocate a large span of length == n. If successful, returns a
222 // span of exactly the specified length. Else, returns NULL. 247 // span of exactly the specified length. Else, returns NULL.
223 Span* AllocLarge(Length n); 248 Span* AllocLarge(Length n);
224 249
225 #if defined(OS_LINUX) 250 // Coalesce span with neighboring spans if possible, prepend to
226 // Coalesce span with neighboring spans if possible. Add the 251 // appropriate free list, and adjust stats.
227 // resulting span to the appropriate free list. 252 void MergeIntoFreeList(Span* span);
228 void AddToFreeList(Span* span); 253
229 #else // ! defined(OS_LINUX)
230 // Commit the span. 254 // Commit the span.
231 void CommitSpan(Span* span); 255 void CommitSpan(Span* span);
232 256
233 // Decommit the span. 257 // Decommit the span.
234 void DecommitSpan(Span* span); 258 void DecommitSpan(Span* span);
235 #endif // ! defined(OS_LINUX) 259
260 // Prepends span to appropriate free list, and adjusts stats.
261 void PrependToFreeList(Span* span);
262
263 // Removes span from its free list, and adjust stats.
264 void RemoveFromFreeList(Span* span);
236 265
237 // Incrementally release some memory to the system. 266 // Incrementally release some memory to the system.
238 // IncrementalScavenge(n) is called whenever n pages are freed. 267 // IncrementalScavenge(n) is called whenever n pages are freed.
239 void IncrementalScavenge(Length n); 268 void IncrementalScavenge(Length n);
240 269
241 #if defined(OS_LINUX) 270 // Release the last span on the normal portion of this list.
242 // Release all pages in the specified free list for reuse by the OS 271 // Return the length of that span.
243 // REQURES: list must be a "normal" list (i.e., not "returned") 272 Length ReleaseLastNormalSpan(SpanList* slist);
244 void ReleaseFreeList(Span* list); 273
245 #else // ! defined(OS_LINUX)
246 // Releases all memory held in the given list's 'normal' freelist and adds
247 // it to the 'released' freelist.
248 void ReleaseFreeList(Span* list, Span* returned);
249 #endif // ! defined(OS_LINUX)
250 274
251 // Number of pages to deallocate before doing more scavenging 275 // Number of pages to deallocate before doing more scavenging
252 int64_t scavenge_counter_; 276 int64_t scavenge_counter_;
253 277
254 // Index of last free list we scavenged 278 // Index of last free list where we released memory to the OS.
255 int scavenge_index_; 279 int release_index_;
256 }; 280 };
257 281
258 } // namespace tcmalloc 282 } // namespace tcmalloc
259 283
284 #ifdef _MSC_VER
285 #pragma warning(pop)
286 #endif
287
260 #endif // TCMALLOC_PAGE_HEAP_H_ 288 #endif // TCMALLOC_PAGE_HEAP_H_
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/packed-cache-inl.h ('k') | third_party/tcmalloc/chromium/src/page_heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698