Chromium Code Reviews| Index: base/memory/persistent_memory_allocator.cc |
| diff --git a/base/memory/persistent_memory_allocator.cc b/base/memory/persistent_memory_allocator.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..13df2300271ec03ea3f97f8046b48f07160159e0 |
| --- /dev/null |
| +++ b/base/memory/persistent_memory_allocator.cc |
| @@ -0,0 +1,550 @@ |
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/memory/persistent_memory_allocator.h" |
| + |
| +#include <assert.h> |
| +#include <algorithm> |
| + |
| +#include "base/logging.h" |
| +#include "base/metrics/histogram_macros.h" |
| + |
| +// All integer constants in this file are signed because Atomic32 is signed |
| +// and keeping all others consistent with this avoids a lot of unnecessary |
| +// casting to avoid signed/unsigned operations just to avoid compiler errors. |
| +// This means an occasonal cast of a constant from sizeof() to "int" but |
| +// is far simpler than the alternative. Only the external interface uses |
| +// size_t for simplicity to the caller. |
| + |
| +namespace { |
| + |
| +// Required range of memory segment sizes. It has to fit in a signed 32-bit |
| +// number and should be a power of 2 in order to accomodate almost any page |
| +// size. |
| +const int32_t kSegmentMinSize = 1 << 10; // 1 KiB |
| +const int32_t kSegmentMaxSize = 1 << 30; // 1 GiB |
| + |
| +// All allocations and data-structures must be aligned to this byte boundary. |
| +// Alignment as large as the physical bus between CPU and RAM is _required_ |
| +// for some architectures, is simply more efficient on other CPUs, and |
| +// generally a Good Idea(tm) for all platforms as it reduces/eliminates the |
| +// chance that a type will span cache lines. Alignment mustn't be less |
| +// than 8 to ensure proper alignment for all types. The rest is a balance |
| +// between reducing spans across multiple cache lines and wasted space spent |
| +// padding out allocations. An alignment of 16 would ensure that the block |
| +// header structure always sits in a single cache line. An average of about |
| +// 1/2 this value will be wasted with every allocation. |
| +const int32_t kAllocAlignment = 8; |
| + |
| +// A constant (random) value placed in the shared metadata to identify |
| +// an already initialized memory segment. |
| +const int32_t kGlobalCookie = 0x408305DC; |
| + |
| +// The current version of the metadata. If updates are made that change |
| +// the metadata, the version number can be queried to operate in a backward- |
| +// compatible manner until the memory segment is completely re-initalized. |
| +const int32_t kGlobalVersion = 1; |
| + |
| +// Constant values placed in the block headers to indicate its state. |
| +const int32_t kBlockCookieFree = 0; |
| +const int32_t kBlockCookieQueue = 1; |
| +const int32_t kBlockCookieWasted = -1; |
| +const int32_t kBlockCookieAllocated = 0xC8799269; |
| + |
| +// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> |
| +// types rather than combined bitfield. |
| + |
| +// Flags stored in the flags_ field of the SharedMetaData structure below. |
| +enum : int32_t { |
| + kFlagCorrupt = 1 << 0, |
| + kFlagFull = 1 << 1 |
| +}; |
| + |
| +bool CheckFlag(volatile base::subtle::Atomic32* flags, int flag) { |
| + base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); |
| + return (loaded_flags & flag) != 0; |
| +} |
| + |
| +void SetFlag(volatile base::subtle::Atomic32* flags, int flag) { |
| + for (;;) { |
| + base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); |
| + base::subtle::Atomic32 new_flags = |
| + (loaded_flags & ~flag) | flag; |
| + if (base::subtle::Release_CompareAndSwap( |
| + flags, loaded_flags, new_flags) == loaded_flags) { |
| + break; |
| + } |
| + } |
| +} |
| + |
| +} // namespace |
| + |
| +namespace base { |
| + |
| +// The block-header is placed at the top of every allocation within the |
| +// segment to describe the data that follows it. |
| +struct PersistentMemoryAllocator::BlockHeader { |
| + int32_t size; // Number of bytes in this block, including header. |
| + int32_t cookie; // Constant value indicating completed allocation. |
| + uint32_t type_id; // A number provided by caller indicating data type. |
| + subtle::Atomic32 next; // Pointer to the next block when iterating. |
| +}; |
| + |
| +// The shared metadata exists once at the top of the memory segment to |
| +// describe the state of the allocator to all processes. |
| +struct PersistentMemoryAllocator::SharedMetadata { |
| + int32_t cookie; // Some value that indicates complete initialization. |
| + int32_t size; // Total size of memory segment. |
| + int32_t page_size; // Paging size within memory segment. |
| + int32_t version; // Version code so upgrades don't break. |
| + subtle::Atomic32 freeptr; // Offset/ref to first free space in the segment. |
| + subtle::Atomic32 flags; // Bitfield of information flags. |
| + int32_t name; // Reference to stored name string. |
| + |
| + // The "iterable" queue is an M&S Queue as described here, append-only: |
| + // https://www.research.ibm.com/people/m/michael/podc-1996.pdf |
| + subtle::Atomic32 tailptr; // Last block available for iteration. |
| + BlockHeader queue; // Empty block for linked-list head/tail. (must be last) |
| +}; |
| + |
| +// The "queue" block header is used to detect "last node" so that zero/null |
| +// can be used to indicate that it hasn't been added at all. It is part of |
| +// the SharedMetadata structure which itself is always located at offset zero. |
| +const PersistentMemoryAllocator::Reference |
| + PersistentMemoryAllocator::kReferenceQueue = |
| + offsetof(SharedMetadata, queue); |
| +const PersistentMemoryAllocator::Reference |
| + PersistentMemoryAllocator::kReferenceNull = 0; |
| + |
| +PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, |
| + size_t size, |
| + size_t page_size, |
| + const std::string& name) |
| + : mem_base_(static_cast<char*>(base)), |
| + mem_size_(static_cast<int32_t>(size)), |
| + mem_page_(static_cast<int32_t>((page_size ? page_size : size))), |
| + corrupt_(0), |
| + allocs_histogram_(nullptr), |
| + used_histogram_(nullptr) { |
| + static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, |
| + "BlockHeader is not a multiple of kAllocAlignment"); |
| + static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, |
| + "SharedMetadata is not a multiple of kAllocAlignment"); |
| + |
| + CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); |
| + CHECK(size >= kSegmentMinSize && size <= kSegmentMaxSize && |
| + size % kAllocAlignment == 0); |
| + CHECK(page_size == 0 || size % page_size == 0); |
| + |
| + if (shared_meta()->cookie != kGlobalCookie) { |
| + // This block is only executed when a completely new memory segment is |
| + // being initialized. It's unshared and single-threaded... |
| + volatile BlockHeader* const first_block = |
| + reinterpret_cast<volatile BlockHeader*>(mem_base_ + |
| + sizeof(SharedMetadata)); |
| + if (shared_meta()->cookie != 0 || |
| + shared_meta()->size != 0 || |
| + shared_meta()->version != 0 || |
| + subtle::NoBarrier_Load(&shared_meta()->freeptr) != 0 || |
| + subtle::NoBarrier_Load(&shared_meta()->flags) != 0 || |
| + shared_meta()->name != 0 || |
| + shared_meta()->tailptr != 0 || |
| + shared_meta()->queue.cookie != 0 || |
| + subtle::NoBarrier_Load(&shared_meta()->queue.next) != 0 || |
| + first_block->size != 0 || |
| + first_block->cookie != 0 || |
| + first_block->type_id != 0 || |
| + first_block->next != 0) { |
| + // ...or something malicious has been playing with the metadata. |
| + NOTREACHED(); |
| + SetCorrupt(); |
| + } |
| + |
| + // This is still safe to do even if corruption has been detected. |
| + shared_meta()->cookie = kGlobalCookie; |
| + shared_meta()->size = mem_size_; |
| + shared_meta()->page_size = mem_page_; |
| + shared_meta()->version = kGlobalVersion; |
| + subtle::NoBarrier_Store(&shared_meta()->freeptr, sizeof(SharedMetadata)); |
| + |
| + // Set up the queue of iterable allocations. |
| + shared_meta()->queue.size = sizeof(BlockHeader); |
| + shared_meta()->queue.cookie = kBlockCookieQueue; |
| + subtle::NoBarrier_Store(&shared_meta()->queue.next, kReferenceQueue); |
| + subtle::NoBarrier_Store(&shared_meta()->tailptr, kReferenceQueue); |
| + |
| + // Allocate space for the name so other processes can learn it. |
| + if (!name.empty()) { |
| + const size_t name_length = name.length() + 1; |
| + shared_meta()->name = Allocate(name_length, 0); |
| + char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); |
| + if (name_cstr) |
| + strcpy(name_cstr, name.c_str()); |
| + } |
| + } else { |
| + // The allocator is attaching to a previously initialized segment of |
| + // memory. Make sure the embedded data matches what has been passed. |
| + if (shared_meta()->size != mem_size_ || |
| + shared_meta()->page_size != mem_page_) { |
| + NOTREACHED(); |
| + SetCorrupt(); |
| + } |
| + } |
| + |
| + // Metrics are created here so there is no recursion from Allocate |
| + // trying to update a histogram that needs to be created and in turn |
| + // calls Allocate again. |
| + // Some metrics are only active on the primary owner. |
| + if (!name.empty()) { |
| + used_histogram_ = Histogram::FactoryGet( |
| + name + ".UsedKiB", 1, 256 << 10, 100, HistogramBase::kNoFlags); |
| + } |
| + |
| + // Other metrics are active on all users of the memory segment. |
| + Reference name_ref = shared_meta()->name; |
| + char* name_cstr = GetAsObject<char>(name_ref, 0); |
| + if (name_cstr) { |
| + size_t name_length = GetAllocSize(name_ref); |
| + while (name_length > 0 && name_cstr[name_length - 1] != '\0') |
| + --name_length; |
| + if (name_length > 0) { |
| + std::string shared_name(name_cstr, name_length); |
| + allocs_histogram_ = Histogram::FactoryGet( |
| + shared_name + ".Allocs", 1, 10000, 50, HistogramBase::kNoFlags); |
| + } |
| + } |
| +} |
| + |
| +PersistentMemoryAllocator::~PersistentMemoryAllocator() { |
| + // It's strictly forbidden to do any memory access here in case there is |
| + // some issue with the underlying memory segment. The "Local" allocator |
| + // makes use of this to allow deletion of the segment on the heap from |
| + // within its destructor. |
| +} |
| + |
| +size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) { |
| + volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| + if (!block) |
| + return 0; |
| + int32_t size = block->size; |
| + // Header was verified by GetBlock() but a malicious actor could change |
| + // the value between there and here. Check it again. |
| + if (size <= (int)sizeof(BlockHeader) || ref + size >= mem_size_) |
| + return 0; |
| + return static_cast<size_t>(size - sizeof(BlockHeader)); |
| +} |
| + |
| +uint32_t PersistentMemoryAllocator::GetType(Reference ref) { |
| + volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| + if (!block) |
| + return 0; |
| + return block->type_id; |
| +} |
| + |
| +void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) { |
| + volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| + if (!block) |
| + return; |
| + block->type_id = type_id; |
| +} |
| + |
| +int32_t PersistentMemoryAllocator::Allocate(size_t usize, uint32_t type_id) { |
| + // Validate usize to ensure it won't overflow when used as signed 32-bit. |
| + if (usize > (size_t)kSegmentMaxSize - sizeof(BlockHeader)) { |
| + NOTREACHED(); |
| + return kReferenceNull; |
| + } |
| + |
| + // Round up the requested size, plus header, to the next allocation alignment. |
| + int32_t size = static_cast<int32_t>(usize + sizeof(BlockHeader)); |
| + size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); |
| + if (size <= (int)sizeof(BlockHeader) || size > mem_page_) { |
| + NOTREACHED(); |
| + return kReferenceNull; |
| + } |
| + |
| + // Allocation is lockless so we do all our caculation and then, if saving |
| + // indicates a change has occurred since we started, scrap everything and |
| + // start over. |
| + for (;;) { |
| + if (IsCorrupt()) |
| + return kReferenceNull; |
| + |
| + // Get the current start of unallocated memory. Other threads may |
| + // update this at any time and cause us to retry these operations. |
| + const int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); |
| + if (freeptr + size > mem_size_) { |
| + SetFlag(&shared_meta()->flags, kFlagFull); |
| + return kReferenceNull; |
| + } |
| + |
| + // Get pointer to the "free" block. It doesn't even have a header; pass |
| + // -sizeof(header) so accouting for that will yield an expected size of |
| + // zero which is what will be stored at that location. If something |
| + // has been allocated since the load of freeptr above, it is still safe |
| + // as nothing will be written to that location until after the CAS below. |
| + volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true); |
| + if (!block) { |
| + SetCorrupt(); |
| + return kReferenceNull; |
| + } |
| + |
| + // An allocation cannot cross page boundaries. If it would, create a |
| + // "wasted" block and begin again at the top of the next page. This |
| + // area could just be left empty but we fill in the block header just |
| + // for completeness sake. |
| + const int32_t page_free = mem_page_ - freeptr % mem_page_; |
| + if (size > page_free) { |
| + if (page_free <= (int)sizeof(BlockHeader)) { |
| + SetCorrupt(); |
| + return kReferenceNull; |
| + } |
| + const int32_t new_freeptr = freeptr + page_free; |
| + if (subtle::NoBarrier_CompareAndSwap( |
| + &shared_meta()->freeptr, freeptr, new_freeptr) == freeptr) { |
| + block->size = page_free; |
| + block->cookie = kBlockCookieWasted; |
| + } |
| + continue; |
| + } |
| + |
| + // Don't leave a slice at the end of a page too small for anything. This |
| + // can result in an allocation up to two alignment-sizes greater than the |
| + // minimum required by requested-size + header + alignment. |
| + if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) |
| + size = page_free; |
| + |
| + const int32_t new_freeptr = freeptr + size; |
| + if (new_freeptr > mem_size_) { |
| + SetCorrupt(); |
| + return kReferenceNull; |
| + } |
| + |
| + if (subtle::NoBarrier_CompareAndSwap( |
| + &shared_meta()->freeptr, freeptr, new_freeptr) != freeptr) { |
| + // Another thread must have completed an allocation while we were working. |
| + // Try again. |
| + continue; |
| + } |
| + |
| + // Record this allocation in usage stats (if active). This is safe |
| + // to call at this point because the allocation is complete. |
| + if (allocs_histogram_) |
| + allocs_histogram_->Add(static_cast<HistogramBase::Sample>(usize)); |
| + |
| + // Given that all memory was zeroed before ever being given to an instance |
| + // of this class and given that we only allocate in a monotomic fashion |
| + // going forward, it must be that the newly allocated block is completely |
| + // full of zeros. If we find anything in the block header that is NOT a |
| + // zero then something must have previously run amuck through memory, |
| + // writing beyond the allocated space and into unallocated space. |
| + if (block->size != 0 || |
| + block->cookie != kBlockCookieFree || |
| + block->type_id != 0 || |
| + subtle::NoBarrier_Load(&block->next) != 0) { |
| + SetCorrupt(); |
| + return kReferenceNull; |
| + } |
| + |
| + block->size = size; |
| + block->cookie = kBlockCookieAllocated; |
| + block->type_id = type_id; |
| + return freeptr; |
| + } |
| +} |
| + |
| +void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { |
| + int32_t remaining = |
| + mem_size_ - subtle::NoBarrier_Load(&shared_meta()->freeptr); |
| + meminfo->total = mem_size_; |
| + meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); |
| +} |
| + |
| +void PersistentMemoryAllocator::MakeIterable(Reference ref) { |
| + if (IsCorrupt()) |
| + return; |
| + volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); |
| + if (!block) // invalid reference |
| + return; |
| + if (subtle::Acquire_Load(&block->next) != 0) // previously set iterable |
| + return; |
| + subtle::Release_Store(&block->next, kReferenceQueue); // will be tail block |
| + |
| + // Try to add this block to the tail of the queue. May take multiple tries. |
| + int32_t tail; |
| + for (;;) { |
| + // Acquire the current tail-pointer released by previous call to this |
| + // method and validate it. |
| + tail = subtle::Acquire_Load(&shared_meta()->tailptr); |
| + block = GetBlock(tail, 0, 0, true, false); |
| + if (!block) { |
| + SetCorrupt(); |
| + return; |
| + } |
| + |
| + // Try to insert the block at the tail of the queue. The tail node always |
| + // has an existing value of kReferenceQueue; if that is not the value |
| + // returned, another thread has acted in the meantime. |
| + int32_t next = subtle::Release_CompareAndSwap( |
| + &block->next, kReferenceQueue, ref); |
| + if (next == kReferenceQueue) { |
| + // Update the tail pointer to the new offset. If the "else" clause did |
| + // not exist, then this could be a simple Release_Store to set the new |
| + // value but because it does, it's possible that other threads could add |
| + // one or more nodes at the tail before reaching this point. We don't |
| + // have to check the return value because it either operates correctly |
| + // or the exact same operation has already been done (by the "else" |
| + // clause). |
| + subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, ref); |
| + return; |
| + } else { |
| + // In the unlikely case that a thread crashed or was killed between the |
| + // update of "next" and the update of "tailptr", it is necessary to |
| + // perform the operation that would have been done. There's no explicit |
| + // check for crash/kill which means that this operation may also happen |
| + // even when the other thread is in perfect working order which is what |
| + // necessitates the CompareAndSwap above. |
| + subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, next); |
| + } |
| + } |
| +} |
| + |
| +void PersistentMemoryAllocator::CreateIterator(Iterator* state) { |
| + state->last = kReferenceQueue; |
| + state->niter = 0; |
| +} |
| + |
| +int32_t PersistentMemoryAllocator::GetNextIterable(Iterator* state, |
| + uint32_t* type_id) { |
| + volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false); |
| + if (!block) // invalid iterator state |
| + return kReferenceNull; |
| + |
| + // The compiler and CPU can freely reorder all memory accesses on which |
| + // there are no dependencies. It could, for example, move the load of |
| + // "freeptr" above this point because there are no explicit dependencies |
| + // between it and "next". If it did, however, then another block could |
| + // be queued after that but before the following load meaning there is |
| + // one more queued block than the future "detect loop by having more |
| + // blocks that could fit before freeptr" will allow. |
| + // |
| + // By "acquiring" the "next" value here, it's synchronized to the enqueue |
| + // of the node which in turn is synchronized to the allocation (which sets |
| + // freeptr). Thus, the scenario above cannot happen. |
| + int32_t next = subtle::Acquire_Load(&block->next); |
| + block = GetBlock(next, 0, 0, false, false); |
| + if (!block) // no next allocation in queue |
| + return kReferenceNull; |
| + |
| + // Memory corruption could cause a loop in the list. We need to detect |
| + // that so as to not cause an infinite loop in the caller. We do this |
| + // simply by making sure we don't iterate more than the absolute maximum |
| + // number of allocations that could have been made. Callers are likely |
| + // to loop multiple times before it is detected but at least it stops. |
| + int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta()->freeptr), |
| + mem_size_); |
| + if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { |
| + SetCorrupt(); |
| + return kReferenceNull; |
| + } |
| + |
| + state->last = next; |
| + state->niter++; |
| + *type_id = block->type_id; |
| + |
| + return next; |
| +} |
| + |
| +// The "corrupted" state is held both locally and globally (shared). The |
| +// shared flag can't be trusted since a malicious actor could overwrite it. |
| +// The local version is immune to foreign actors. Thus, if seen shared, |
| +// copy it locally and, once known, always restore it globally. |
| +void PersistentMemoryAllocator::SetCorrupt() { |
| + CHECK(corrupt_.is_lock_free()); |
|
JF
2015/11/20 20:43:31
It would be useful to explain why this is here (mu
bcwhite
2015/11/23 16:48:22
Done.
|
| + LOG(ERROR) << "Corruption detected in shared-memory segment."; |
| + corrupt_.store(true); |
| + SetFlag(&shared_meta()->flags, kFlagCorrupt); |
| +} |
| + |
| +bool PersistentMemoryAllocator::IsCorrupt() { |
| + if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { |
| + SetCorrupt(); // Make sure all indicators are set. |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| +bool PersistentMemoryAllocator::IsFull() { |
| + return CheckFlag(&shared_meta()->flags, kFlagFull); |
| +} |
| + |
| +// Dereference a block |ref| and ensure that it's valid for the desired |
| +// |type_id| and |size|. |special| indicates that we may try to access block |
| +// headers not available to callers but still accessed by this module. By |
| +// having internal dereferences go through this same function, the allocator |
| +// is hardened against corruption. |
| +volatile PersistentMemoryAllocator::BlockHeader* |
| +PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, |
| + int32_t size, bool queue_ok, bool free_ok) { |
| + // Validation of parameters. |
| + if (ref % kAllocAlignment != 0) |
| + return nullptr; |
| + if (ref < (int)(queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) |
| + return nullptr; |
| + size += sizeof(BlockHeader); |
| + if (ref + size > mem_size_) |
| + return nullptr; |
| + |
| + // Validation of referenced block-header. |
| + if (!free_ok) { |
| + int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); |
| + if (ref + size > freeptr) |
| + return nullptr; |
| + volatile BlockHeader* const block = |
| + reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); |
| + if (block->size < size) |
| + return nullptr; |
| + if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) |
| + return nullptr; |
| + if (type_id != 0 && block->type_id != type_id) |
| + return nullptr; |
| + } |
| + |
| + // Return pointer to block data. |
| + return reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); |
| +} |
| + |
| +volatile void* PersistentMemoryAllocator::GetBlockData(Reference ref, |
| + uint32_t type_id, |
| + int32_t size) { |
| + DCHECK(size > 0); |
| + volatile BlockHeader* block = GetBlock(ref, type_id, size, false, false); |
| + if (!block) |
| + return nullptr; |
| + return reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader); |
| +} |
| + |
| +void PersistentMemoryAllocator::UpdateStaticHistograms() { |
| + if (used_histogram_) { |
| + MemoryInfo meminfo; |
| + GetMemoryInfo(&meminfo); |
| + HistogramBase::Sample usedkb = static_cast<HistogramBase::Sample>( |
| + (meminfo.total - meminfo.free) >> 10); |
| + used_histogram_->Add(usedkb); |
| + } |
| +} |
| + |
| +//----- LocalPersistentMemoryAllocator ----------------------------------------- |
| + |
| +LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( |
| + size_t size, |
| + const std::string& name) |
| + : PersistentMemoryAllocator(memset(new char[size], 0, size), |
| + size, 0, name) {} |
| + |
| +LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { |
| + delete mem_base_; |
| +} |
| + |
| +} // namespace base |