| Index: third_party/tcmalloc/page_heap.h
|
| ===================================================================
|
| --- third_party/tcmalloc/page_heap.h (revision 0)
|
| +++ third_party/tcmalloc/page_heap.h (revision 0)
|
| @@ -0,0 +1,259 @@
|
| +// Copyright (c) 2008, Google Inc.
|
| +// All rights reserved.
|
| +//
|
| +// Redistribution and use in source and binary forms, with or without
|
| +// modification, are permitted provided that the following conditions are
|
| +// met:
|
| +//
|
| +// * Redistributions of source code must retain the above copyright
|
| +// notice, this list of conditions and the following disclaimer.
|
| +// * Redistributions in binary form must reproduce the above
|
| +// copyright notice, this list of conditions and the following disclaimer
|
| +// in the documentation and/or other materials provided with the
|
| +// distribution.
|
| +// * Neither the name of Google Inc. nor the names of its
|
| +// contributors may be used to endorse or promote products derived from
|
| +// this software without specific prior written permission.
|
| +//
|
| +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| +
|
| +// ---
|
| +// Author: Sanjay Ghemawat <opensource@google.com>
|
| +
|
| +#ifndef TCMALLOC_PAGE_HEAP_H_
|
| +#define TCMALLOC_PAGE_HEAP_H_
|
| +
|
| +#include <config.h>
|
| +#include "common.h"
|
| +#include "packed-cache-inl.h"
|
| +#include "pagemap.h"
|
| +#include "span.h"
|
| +
|
| +// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
|
| +// you're porting to a system where you really can't get a stacktrace.
|
| +#ifdef NO_TCMALLOC_SAMPLES
|
| + // We use #define so code compiles even if you #include stacktrace.h somehow.
|
| +# define GetStackTrace(stack, depth, skip) (0)
|
| +#else
|
| +# include <google/stacktrace.h>
|
| +#endif
|
| +
|
| +
|
| +namespace tcmalloc {
|
| +
|
| +// -------------------------------------------------------------------------
|
| +// Map from page-id to per-page data
|
| +// -------------------------------------------------------------------------
|
| +
|
| +// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
|
| +// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
|
| +// because sometimes the sizeclass is all the information we need.
|
| +
|
| +// Selector class -- general selector uses 3-level map
|
| +template <int BITS> class MapSelector {
|
| + public:
|
| + typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
|
| + typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
|
| +};
|
| +
|
| +// A two-level map for 32-bit machines
|
| +template <> class MapSelector<32> {
|
| + public:
|
| + typedef TCMalloc_PageMap2<32-kPageShift> Type;
|
| + typedef PackedCache<32-kPageShift, uint16_t> CacheType;
|
| +};
|
| +
|
| +// -------------------------------------------------------------------------
|
| +// Page-level allocator
|
| +// * Eager coalescing
|
| +//
|
| +// Heap for page-level allocation. We allow allocating and freeing a
|
| +// contiguous runs of pages (called a "span").
|
| +// -------------------------------------------------------------------------
|
| +
|
| +class PageHeap {
|
| + public:
|
| + PageHeap();
|
| +
|
| + // Allocate a run of "n" pages. Returns zero if out of memory.
|
| + // Caller should not pass "n == 0" -- instead, n should have
|
| + // been rounded up already.
|
| + Span* New(Length n);
|
| +
|
| + // Delete the span "[p, p+n-1]".
|
| + // REQUIRES: span was returned by earlier call to New() and
|
| + // has not yet been deleted.
|
| + void Delete(Span* span);
|
| +
|
| + // Mark an allocated span as being used for small objects of the
|
| + // specified size-class.
|
| + // REQUIRES: span was returned by an earlier call to New()
|
| + // and has not yet been deleted.
|
| + void RegisterSizeClass(Span* span, size_t sc);
|
| +
|
| + // Split an allocated span into two spans: one of length "n" pages
|
| + // followed by another span of length "span->length - n" pages.
|
| + // Modifies "*span" to point to the first span of length "n" pages.
|
| + // Returns a pointer to the second span.
|
| + //
|
| + // REQUIRES: "0 < n < span->length"
|
| + // REQUIRES: span->location == IN_USE
|
| + // REQUIRES: span->sizeclass == 0
|
| + Span* Split(Span* span, Length n);
|
| +
|
| + // Return the descriptor for the specified page.
|
| + inline Span* GetDescriptor(PageID p) const {
|
| + return reinterpret_cast<Span*>(pagemap_.get(p));
|
| + }
|
| +
|
| + // Dump state to stderr
|
| + void Dump(TCMalloc_Printer* out);
|
| +
|
| + // Return number of bytes allocated from system
|
| + inline uint64_t SystemBytes() const { return system_bytes_; }
|
| +
|
| + // Return number of free bytes in heap
|
| + uint64_t FreeBytes() const {
|
| + return (static_cast<uint64_t>(free_pages_) << kPageShift);
|
| + }
|
| +
|
| + bool Check();
|
| + // Like Check() but does some more comprehensive checking.
|
| + bool CheckExpensive();
|
| + bool CheckList(Span* list, Length min_pages, Length max_pages,
|
| + int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
|
| +
|
| + // Release all pages on the free list for reuse by the OS:
|
| + void ReleaseFreePages();
|
| +
|
| + // Return 0 if we have no information, or else the correct sizeclass for p.
|
| + // Reads and writes to pagemap_cache_ do not require locking.
|
| + // The entries are 64 bits on 64-bit hardware and 16 bits on
|
| + // 32-bit hardware, and we don't mind raciness as long as each read of
|
| + // an entry yields a valid entry, not a partially updated entry.
|
| + size_t GetSizeClassIfCached(PageID p) const {
|
| + return pagemap_cache_.GetOrDefault(p, 0);
|
| + }
|
| + void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
|
| +
|
| + // Attempt to free some free pages currently not used.
|
| + void Scavenge();
|
| +
|
| + private:
|
| + // Allocates a big block of memory for the pagemap once we reach more than
|
| + // 128MB
|
| + static const size_t kPageMapBigAllocationThreshold = 128 << 20;
|
| +
|
| + // Minimum number of pages to fetch from system at a time. Must be
|
| + // significantly bigger than kBlockSize to amortize system-call
|
| + // overhead, and also to reduce external fragementation. Also, we
|
| + // should keep this value big because various incarnations of Linux
|
| + // have small limits on the number of mmap() regions per
|
| + // address-space.
|
| + static const int kMinSystemAlloc = 1 << (20 - kPageShift);
|
| +
|
| + // For all span-lengths < kMaxPages we keep an exact-size list.
|
| + // REQUIRED: kMaxPages >= kMinSystemAlloc;
|
| + static const size_t kMaxPages = kMinSystemAlloc;
|
| +
|
| + // Pick the appropriate map and cache types based on pointer size
|
| + typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
|
| + typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
|
| + PageMap pagemap_;
|
| + mutable PageMapCache pagemap_cache_;
|
| +
|
| + // We segregate spans of a given size into two circular linked
|
| + // lists: one for normal spans, and one for spans whose memory
|
| + // has been returned to the system.
|
| + struct SpanList {
|
| + Span normal;
|
| + Span returned;
|
| + };
|
| +
|
| + // List of free spans of length >= kMaxPages
|
| + SpanList large_;
|
| +
|
| + // Array mapping from span length to a doubly linked list of free spans
|
| + SpanList free_[kMaxPages];
|
| +
|
| + // Number of pages kept in free lists
|
| + uintptr_t free_pages_;
|
| +
|
| + // Bytes allocated from system
|
| + uint64_t system_bytes_;
|
| +
|
| + bool GrowHeap(Length n);
|
| +
|
| + // REQUIRES: span->length >= n
|
| + // REQUIRES: span->location != IN_USE
|
| + // Remove span from its free list, and move any leftover part of
|
| + // span into appropriate free lists. Also update "span" to have
|
| + // length exactly "n" and mark it as non-free so it can be returned
|
| + // to the client. After all that, decrease free_pages_ by n and
|
| + // return span.
|
| + Span* Carve(Span* span, Length n);
|
| +
|
| + void RecordSpan(Span* span) {
|
| + pagemap_.set(span->start, span);
|
| + if (span->length > 1) {
|
| + pagemap_.set(span->start + span->length - 1, span);
|
| + }
|
| + }
|
| +
|
| + // Allocate a large span of length == n. If successful, returns a
|
| + // span of exactly the specified length. Else, returns NULL.
|
| + Span* AllocLarge(Length n);
|
| +
|
| + // Commits the span.
|
| + void CommitSpan(Span* span);
|
| +
|
| +#if DEFER_DECOMMIT
|
| + // Number of free committed pages that we want to keep around.
|
| + static const size_t kMinimumFreeCommittedPageCount = 512; // 2M (2 ** 21) for 4K pages
|
| +
|
| + // During a scavenge, we'll release up to a fraction of the free committed pages.
|
| +#ifdef _WIN32
|
| + // We are slightly less aggressive in releasing memory on Windows due to performance reasons.
|
| + static const int kMaxScavengeAmountFactor = 3;
|
| +#else
|
| + static const int kMaxScavengeAmountFactor = 2;
|
| +#endif
|
| +
|
| + // Decommits some parts from SpanList.
|
| + uint64_t DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit);
|
| +
|
| + // Decommits some parts from SpanList.
|
| + Length DecommitLastSpan(SpanList* span_list, Span* span);
|
| +
|
| + // Number of pages kept in free lists that are still committed a.k.a. total
|
| + // number of pages in "normal" lists.
|
| + uint64_t free_committed_pages_;
|
| +
|
| + // Number of pages that we commited in the last scavenge wait interval.
|
| + uint64_t pages_committed_since_last_scavenge_;
|
| +#else
|
| + // Incrementally release some memory to the system.
|
| + // IncrementalScavenge(n) is called whenever n pages are freed.
|
| + void IncrementalScavenge(Length n);
|
| +
|
| + // Number of pages to deallocate before doing more scavenging
|
| + int64_t scavenge_counter_;
|
| +#endif
|
| +
|
| + // Index of last free list we scavenged
|
| + int scavenge_index_;
|
| +};
|
| +
|
| +} // namespace tcmalloc
|
| +
|
| +#endif // TCMALLOC_PAGE_HEAP_H_
|
|
|