Index: third_party/tcmalloc/chromium/src/pagemap.h |
diff --git a/third_party/tcmalloc/chromium/src/pagemap.h b/third_party/tcmalloc/chromium/src/pagemap.h |
index 27cb3da050f23c46f3a058fefe66079fab41071b..0db01c4bc8bca5d2224446693e78515e4475ff00 100644 |
--- a/third_party/tcmalloc/chromium/src/pagemap.h |
+++ b/third_party/tcmalloc/chromium/src/pagemap.h |
@@ -56,6 +56,12 @@ |
#else |
#include <sys/types.h> |
#endif |
+#ifdef WIN32 |
+// TODO(jar): This is not needed when TCMalloc_PageMap1_LazyCommit has an API |
+// supporting commit and reservation of memory. |
+#include "common.h" |
+#endif |
+ |
#include "internal_logging.h" // for ASSERT |
// Single-level array |
@@ -114,6 +120,201 @@ class TCMalloc_PageMap1 { |
} |
}; |
+#ifdef WIN32 |
+// Lazy commit, single-level array. |
+// Very similar to PageMap1, except the page map is only committed as needed. |
+// Since we don't return memory to the OS, the committed portion of the map will |
+// only grow, and we'll only be called to Ensure when we really grow the heap. |
+// We maintain a bit map to help us deduce if we've already committed a range |
+// in our map. |
+template <int BITS> |
+class TCMalloc_PageMap1_LazyCommit { |
+ private: |
+ // Dimension of our page map array_. |
+ static const int LENGTH = 1 << BITS; |
+ |
+ // The page map array that sits in reserved virtual space. Pages of this |
+ // array are committed as they are needed. For each page of virtual memory, |
+ // we potentially have a pointer to a span instance. |
+ void** array_; |
+ |
+ // A bit vector that allows us to deduce what pages in array_ are committed. |
+ // Note that 2^3 = 8 bits per char, and hence the use of the magical "3" in |
+ // the array range gives us the effective "divide by 8". |
+ char committed_[sizeof(void*) << (BITS - kPageShift - 3)]; |
+ |
+ // Given an |index| into |array_|, find the page number in |array_| that holds |
+ // that element. |
+ size_t ContainingPage(size_t index) const { |
+ return (index * sizeof(*array_)) >> kPageShift; |
+ } |
+ |
+ // Find out if the given page_num index in array_ is in committed memory. |
+ bool IsCommitted(size_t page_num) const { |
+ return committed_[page_num >> 3] & (1 << (page_num & 0x7)); |
+ } |
+ |
+ // Remember that the given page_num index in array_ is in committed memory. |
+ void SetCommitted(size_t page_num) { |
+ committed_[page_num >> 3] |= (1 << (page_num & 0x7)); |
+ } |
+ |
+ public: |
+ typedef uintptr_t Number; |
+ |
+ explicit TCMalloc_PageMap1_LazyCommit(void* (*allocator)(size_t)) { |
+ // TODO(jar): We need a reservation function, but current API to this class |
+ // only provides an allocator. |
+ // Get decommitted memory. We will commit as necessary. |
+ array_ = reinterpret_cast<void**>(VirtualAlloc( |
+ NULL, sizeof(*array_) << BITS, MEM_RESERVE, PAGE_READWRITE)); |
+ |
+ // Make sure we divided LENGTH evenly. |
+ ASSERT(sizeof(committed_) * 8 == (LENGTH * sizeof(*array_)) >> kPageShift); |
+ // Indicate that none of the pages of array_ have been committed yet. |
+ memset(committed_, 0, sizeof(committed_)); |
+ } |
+ |
+ // Ensure that the map contains initialized and committed entries in array_ to |
+ // describe pages "x .. x+n-1". |
+ // Returns true if successful, false if we could not ensure this. |
+ // If we have to commit more memory in array_ (which also clears said memory), |
+ // then we'll set some of the bits in committed_ to remember this fact. |
+ // Only the bits of committed_ near end-points for calls to Ensure() are ever |
+ // set, as the calls to Ensure() will never have overlapping ranges other than |
+ // at their end-points. |
+ // |
+ // Example: Suppose the OS allocates memory in pages including 40...50, and |
+ // later the OS allocates memory in pages 51...83. When the first allocation |
+ // of 40...50 is observed, then Ensure of (39,51) will be called. The range |
+ // shown in the arguments is extended so that tcmalloc can look to see if |
+ // adjacent pages are part of a span that can be coaleced. Later, when pages |
+ // 51...83 are allocated, Ensure() will be called with arguments (50,84), |
+ // broadened again for the same reason. |
+ // |
+ // After the above, we would NEVER get a call such as Ensure(45,60), as that |
+ // overlaps with the interior of prior ensured regions. We ONLY get an Ensure |
+ // call when the OS has allocated memory, and since we NEVER give memory back |
+ // to the OS, the OS can't possible allocate the same region to us twice, and |
+ // can't induce an Ensure() on an interior of previous Ensure call. |
+ // |
+ // Also note that OS allocations are NOT guaranteed to be consecutive (there |
+ // may be "holes" where code etc. uses the virtual addresses), or to appear in |
+ // any order, such as lowest to highest, or vice versa (as other independent |
+ // allocation systems in the process may be performing VirtualAllocations and |
+ // VirtualFrees asynchronously.) |
+ bool Ensure(Number x, size_t n) { |
+ if (n > LENGTH - x) |
+ return false; // We won't Ensure mapping for last pages in memory. |
+ ASSERT(n > 0); |
+ |
+ // For a given page number in memory, calculate what page in array_ needs to |
+ // be memory resident. Note that we really only need a few bytes in array_ |
+ // for each page of virtual space we have to map, but we can only commit |
+ // whole pages of array_. For instance, a 4K page of array_ has about 1k |
+ // entries, and hence can map about 1K pages, or a total of about 4MB |
+ // typically. As a result, it is possible that the first entry in array_, |
+ // and the n'th entry in array_, will sit in the same page of array_. |
+ size_t first_page = ContainingPage(x); |
+ size_t last_page = ContainingPage(x + n - 1); |
+ |
+ // Check at each boundary, to see if we need to commit at that end. Some |
+ // other neighbor may have already forced us to commit at either or both |
+ // boundaries. |
+ if (IsCommitted(first_page)) { |
+ if (first_page == last_page) return true; |
+ ++first_page; |
+ if (IsCommitted(first_page)) { |
+ if (first_page == last_page) return true; |
+ ++first_page; |
+ } |
+ } |
+ |
+ if (IsCommitted(last_page)) { |
+ if (first_page == last_page) return true; |
+ --last_page; |
+ if (IsCommitted(last_page)) { |
+ if (first_page == last_page) return true; |
+ --last_page; |
+ } |
+ } |
+ |
+ ASSERT(!IsCommitted(last_page)); |
+ ASSERT(!IsCommitted(first_page)); |
+ |
+ void* start = reinterpret_cast<char*>(array_) + (first_page << kPageShift); |
+ size_t length = (last_page - first_page + 1) << kPageShift; |
+ |
+#ifndef NDEBUG |
+ // Validate we are committing new sections, and hence we're not clearing any |
+ // existing data. |
+ MEMORY_BASIC_INFORMATION info = {0}; |
+ size_t result = VirtualQuery(start, &info, sizeof(info)); |
+ ASSERT(result); |
+ ASSERT(0 == (info.State & MEM_COMMIT)); // It starts with uncommitted. |
+ ASSERT(info.RegionSize >= length); // Entire length is uncommitted. |
+#endif |
+ |
+ // TODO(jar): We need a commit that automatically tallies metadata_bytes. |
+ TCMalloc_SystemCommit(start, length); |
+ tcmalloc::increment_metadata_system_bytes(length); |
+ |
+#ifndef NDEBUG |
+ result = VirtualQuery(start, &info, sizeof(info)); |
+ ASSERT(result); |
+ ASSERT(0 != (info.State & MEM_COMMIT)); // Now it is committed. |
+ ASSERT(info.RegionSize >= length); // Entire length is committed. |
+#endif |
+ |
+ // As noted in the large comment/example describing this method, we will |
+ // never be called with a range of pages very much inside this |first_page| |
+ // to |last_page| range. |
+ // As a result, we only need to set bits for each end of that range, and one |
+ // page inside each end. |
+ SetCommitted(first_page); |
+ if (first_page < last_page) { |
+ SetCommitted(last_page); |
+ SetCommitted(first_page + 1); // These may be duplicates now. |
+ SetCommitted(last_page - 1); |
+ } |
+ |
+ return true; |
+ } |
+ |
+ // This is a premature call to get all the meta-memory allocated, so as to |
+ // avoid virtual space fragmentation. Since we pre-reserved all memory, we |
+ // don't need to do anything here (we won't fragment virtual space). |
+ void PreallocateMoreMemory() {} |
+ |
+ // Return the current value for KEY. Returns NULL if not yet set, |
+ // or if k is out of range. |
+ void* get(Number k) const { |
+ if ((k >> BITS) > 0) { |
+ return NULL; |
+ } |
+ return array_[k]; |
+ } |
+ |
+ // REQUIRES "k" is in range "[0,2^BITS-1]". |
+ // REQUIRES "k" has been ensured before. |
+ // |
+ // Sets the value for KEY. |
+ void set(Number k, void* v) { |
+ array_[k] = v; |
+ } |
+ // Return the first non-NULL pointer found in this map for |
+ // a page number >= k. Returns NULL if no such number is found. |
+ void* Next(Number k) const { |
+ while (k < (1 << BITS)) { |
+ if (array_[k] != NULL) return array_[k]; |
+ k++; |
+ } |
+ return NULL; |
+ } |
+}; |
+#endif // WIN32 |
+ |
+ |
// Two-level radix tree |
template <int BITS> |
class TCMalloc_PageMap2 { |