OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 | 49 |
50 #include <stddef.h> // for NULL, size_t | 50 #include <stddef.h> // for NULL, size_t |
51 #include <string.h> // for memset | 51 #include <string.h> // for memset |
52 #if defined HAVE_STDINT_H | 52 #if defined HAVE_STDINT_H |
53 #include <stdint.h> | 53 #include <stdint.h> |
54 #elif defined HAVE_INTTYPES_H | 54 #elif defined HAVE_INTTYPES_H |
55 #include <inttypes.h> | 55 #include <inttypes.h> |
56 #else | 56 #else |
57 #include <sys/types.h> | 57 #include <sys/types.h> |
58 #endif | 58 #endif |
| 59 #ifdef WIN32 |
| 60 // TODO(jar): This is not needed when TCMalloc_PageMap1_LazyCommit has an API |
| 61 // supporting commit and reservation of memory. |
| 62 #include "common.h" |
| 63 #endif |
| 64 |
59 #include "internal_logging.h" // for ASSERT | 65 #include "internal_logging.h" // for ASSERT |
60 | 66 |
61 // Single-level array | 67 // Single-level array |
62 template <int BITS> | 68 template <int BITS> |
63 class TCMalloc_PageMap1 { | 69 class TCMalloc_PageMap1 { |
64 private: | 70 private: |
65 static const int LENGTH = 1 << BITS; | 71 static const int LENGTH = 1 << BITS; |
66 | 72 |
67 void** array_; | 73 void** array_; |
68 | 74 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
107 // a page number >= k. Returns NULL if no such number is found. | 113 // a page number >= k. Returns NULL if no such number is found. |
108 void* Next(Number k) const { | 114 void* Next(Number k) const { |
109 while (k < (1 << BITS)) { | 115 while (k < (1 << BITS)) { |
110 if (array_[k] != NULL) return array_[k]; | 116 if (array_[k] != NULL) return array_[k]; |
111 k++; | 117 k++; |
112 } | 118 } |
113 return NULL; | 119 return NULL; |
114 } | 120 } |
115 }; | 121 }; |
116 | 122 |
| 123 #ifdef WIN32 |
| 124 // Lazy commit, single-level array. |
| 125 // Very similar to PageMap1, except the page map is only committed as needed. |
| 126 // Since we don't return memory to the OS, the committed portion of the map will |
| 127 // only grow, and we'll only be called to Ensure when we really grow the heap. |
| 128 // We maintain a bit map to help us deduce if we've already committed a range |
| 129 // in our map. |
| 130 template <int BITS> |
| 131 class TCMalloc_PageMap1_LazyCommit { |
| 132 private: |
| 133 // Dimension of our page map array_. |
| 134 static const int LENGTH = 1 << BITS; |
| 135 |
| 136 // The page map array that sits in reserved virtual space. Pages of this |
| 137 // array are committed as they are needed. For each page of virtual memory, |
| 138 // we potentially have a pointer to a span instance. |
| 139 void** array_; |
| 140 |
| 141 // A bit vector that allows us to deduce what pages in array_ are committed. |
| 142 // Note that 2^3 = 8 bits per char, and hence the use of the magical "3" in |
| 143 // the array range gives us the effective "divide by 8". |
| 144 char committed_[sizeof(void*) << (BITS - kPageShift - 3)]; |
| 145 |
| 146 // Given an |index| into |array_|, find the page number in |array_| that holds |
| 147 // that element. |
| 148 size_t ContainingPage(size_t index) const { |
| 149 return (index * sizeof(*array_)) >> kPageShift; |
| 150 } |
| 151 |
| 152 // Find out if the given page_num index in array_ is in committed memory. |
| 153 bool IsCommitted(size_t page_num) const { |
| 154 return committed_[page_num >> 3] & (1 << (page_num & 0x7)); |
| 155 } |
| 156 |
| 157 // Remember that the given page_num index in array_ is in committed memory. |
| 158 void SetCommitted(size_t page_num) { |
| 159 committed_[page_num >> 3] |= (1 << (page_num & 0x7)); |
| 160 } |
| 161 |
| 162 public: |
| 163 typedef uintptr_t Number; |
| 164 |
| 165 explicit TCMalloc_PageMap1_LazyCommit(void* (*allocator)(size_t)) { |
| 166 // TODO(jar): We need a reservation function, but current API to this class |
| 167 // only provides an allocator. |
| 168 // Get decommitted memory. We will commit as necessary. |
| 169 array_ = reinterpret_cast<void**>(VirtualAlloc( |
| 170 NULL, sizeof(*array_) << BITS, MEM_RESERVE, PAGE_READWRITE)); |
| 171 |
| 172 // Make sure we divided LENGTH evenly. |
| 173 ASSERT(sizeof(committed_) * 8 == (LENGTH * sizeof(*array_)) >> kPageShift); |
| 174 // Indicate that none of the pages of array_ have been committed yet. |
| 175 memset(committed_, 0, sizeof(committed_)); |
| 176 } |
| 177 |
| 178 // Ensure that the map contains initialized and committed entries in array_ to |
| 179 // describe pages "x .. x+n-1". |
| 180 // Returns true if successful, false if we could not ensure this. |
| 181 // If we have to commit more memory in array_ (which also clears said memory), |
| 182 // then we'll set some of the bits in committed_ to remember this fact. |
| 183 // Only the bits of committed_ near end-points for calls to Ensure() are ever |
| 184 // set, as the calls to Ensure() will never have overlapping ranges other than |
| 185 // at their end-points. |
| 186 // |
| 187 // Example: Suppose the OS allocates memory in pages including 40...50, and |
| 188 // later the OS allocates memory in pages 51...83. When the first allocation |
| 189 // of 40...50 is observed, then Ensure of (39,51) will be called. The range |
| 190 // shown in the arguments is extended so that tcmalloc can look to see if |
| 191 // adjacent pages are part of a span that can be coaleced. Later, when pages |
| 192 // 51...83 are allocated, Ensure() will be called with arguments (50,84), |
| 193 // broadened again for the same reason. |
| 194 // |
| 195 // After the above, we would NEVER get a call such as Ensure(45,60), as that |
| 196 // overlaps with the interior of prior ensured regions. We ONLY get an Ensure |
| 197 // call when the OS has allocated memory, and since we NEVER give memory back |
| 198 // to the OS, the OS can't possible allocate the same region to us twice, and |
| 199 // can't induce an Ensure() on an interior of previous Ensure call. |
| 200 // |
| 201 // Also note that OS allocations are NOT guaranteed to be consecutive (there |
| 202 // may be "holes" where code etc. uses the virtual addresses), or to appear in |
| 203 // any order, such as lowest to highest, or vice versa (as other independent |
| 204 // allocation systems in the process may be performing VirtualAllocations and |
| 205 // VirtualFrees asynchronously.) |
| 206 bool Ensure(Number x, size_t n) { |
| 207 if (n > LENGTH - x) |
| 208 return false; // We won't Ensure mapping for last pages in memory. |
| 209 ASSERT(n > 0); |
| 210 |
| 211 // For a given page number in memory, calculate what page in array_ needs to |
| 212 // be memory resident. Note that we really only need a few bytes in array_ |
| 213 // for each page of virtual space we have to map, but we can only commit |
| 214 // whole pages of array_. For instance, a 4K page of array_ has about 1k |
| 215 // entries, and hence can map about 1K pages, or a total of about 4MB |
| 216 // typically. As a result, it is possible that the first entry in array_, |
| 217 // and the n'th entry in array_, will sit in the same page of array_. |
| 218 size_t first_page = ContainingPage(x); |
| 219 size_t last_page = ContainingPage(x + n - 1); |
| 220 |
| 221 // Check at each boundary, to see if we need to commit at that end. Some |
| 222 // other neighbor may have already forced us to commit at either or both |
| 223 // boundaries. |
| 224 if (IsCommitted(first_page)) { |
| 225 if (first_page == last_page) return true; |
| 226 ++first_page; |
| 227 if (IsCommitted(first_page)) { |
| 228 if (first_page == last_page) return true; |
| 229 ++first_page; |
| 230 } |
| 231 } |
| 232 |
| 233 if (IsCommitted(last_page)) { |
| 234 if (first_page == last_page) return true; |
| 235 --last_page; |
| 236 if (IsCommitted(last_page)) { |
| 237 if (first_page == last_page) return true; |
| 238 --last_page; |
| 239 } |
| 240 } |
| 241 |
| 242 ASSERT(!IsCommitted(last_page)); |
| 243 ASSERT(!IsCommitted(first_page)); |
| 244 |
| 245 void* start = reinterpret_cast<char*>(array_) + (first_page << kPageShift); |
| 246 size_t length = (last_page - first_page + 1) << kPageShift; |
| 247 |
| 248 #ifndef NDEBUG |
| 249 // Validate we are committing new sections, and hence we're not clearing any |
| 250 // existing data. |
| 251 MEMORY_BASIC_INFORMATION info = {0}; |
| 252 size_t result = VirtualQuery(start, &info, sizeof(info)); |
| 253 ASSERT(result); |
| 254 ASSERT(0 == (info.State & MEM_COMMIT)); // It starts with uncommitted. |
| 255 ASSERT(info.RegionSize >= length); // Entire length is uncommitted. |
| 256 #endif |
| 257 |
| 258 // TODO(jar): We need a commit that automatically tallies metadata_bytes. |
| 259 TCMalloc_SystemCommit(start, length); |
| 260 tcmalloc::increment_metadata_system_bytes(length); |
| 261 |
| 262 #ifndef NDEBUG |
| 263 result = VirtualQuery(start, &info, sizeof(info)); |
| 264 ASSERT(result); |
| 265 ASSERT(0 != (info.State & MEM_COMMIT)); // Now it is committed. |
| 266 ASSERT(info.RegionSize >= length); // Entire length is committed. |
| 267 #endif |
| 268 |
| 269 // As noted in the large comment/example describing this method, we will |
| 270 // never be called with a range of pages very much inside this |first_page| |
| 271 // to |last_page| range. |
| 272 // As a result, we only need to set bits for each end of that range, and one |
| 273 // page inside each end. |
| 274 SetCommitted(first_page); |
| 275 if (first_page < last_page) { |
| 276 SetCommitted(last_page); |
| 277 SetCommitted(first_page + 1); // These may be duplicates now. |
| 278 SetCommitted(last_page - 1); |
| 279 } |
| 280 |
| 281 return true; |
| 282 } |
| 283 |
| 284 // This is a premature call to get all the meta-memory allocated, so as to |
| 285 // avoid virtual space fragmentation. Since we pre-reserved all memory, we |
| 286 // don't need to do anything here (we won't fragment virtual space). |
| 287 void PreallocateMoreMemory() {} |
| 288 |
| 289 // Return the current value for KEY. Returns NULL if not yet set, |
| 290 // or if k is out of range. |
| 291 void* get(Number k) const { |
| 292 if ((k >> BITS) > 0) { |
| 293 return NULL; |
| 294 } |
| 295 return array_[k]; |
| 296 } |
| 297 |
| 298 // REQUIRES "k" is in range "[0,2^BITS-1]". |
| 299 // REQUIRES "k" has been ensured before. |
| 300 // |
| 301 // Sets the value for KEY. |
| 302 void set(Number k, void* v) { |
| 303 array_[k] = v; |
| 304 } |
| 305 // Return the first non-NULL pointer found in this map for |
| 306 // a page number >= k. Returns NULL if no such number is found. |
| 307 void* Next(Number k) const { |
| 308 while (k < (1 << BITS)) { |
| 309 if (array_[k] != NULL) return array_[k]; |
| 310 k++; |
| 311 } |
| 312 return NULL; |
| 313 } |
| 314 }; |
| 315 #endif // WIN32 |
| 316 |
| 317 |
117 // Two-level radix tree | 318 // Two-level radix tree |
118 template <int BITS> | 319 template <int BITS> |
119 class TCMalloc_PageMap2 { | 320 class TCMalloc_PageMap2 { |
120 private: | 321 private: |
121 // Put 32 entries in the root and (2^BITS)/32 entries in each leaf. | 322 // Put 32 entries in the root and (2^BITS)/32 entries in each leaf. |
122 static const int ROOT_BITS = 5; | 323 static const int ROOT_BITS = 5; |
123 static const int ROOT_LENGTH = 1 << ROOT_BITS; | 324 static const int ROOT_LENGTH = 1 << ROOT_BITS; |
124 | 325 |
125 static const int LEAF_BITS = BITS - ROOT_BITS; | 326 static const int LEAF_BITS = BITS - ROOT_BITS; |
126 static const int LEAF_LENGTH = 1 << LEAF_BITS; | 327 static const int LEAF_LENGTH = 1 << LEAF_BITS; |
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
314 } | 515 } |
315 // Advance to next interior entry | 516 // Advance to next interior entry |
316 k = ((k >> LEAF_BITS) + 1) << LEAF_BITS; | 517 k = ((k >> LEAF_BITS) + 1) << LEAF_BITS; |
317 } | 518 } |
318 } | 519 } |
319 return NULL; | 520 return NULL; |
320 } | 521 } |
321 }; | 522 }; |
322 | 523 |
323 #endif // TCMALLOC_PAGEMAP_H_ | 524 #endif // TCMALLOC_PAGEMAP_H_ |
OLD | NEW |