| Index: base/metrics/persistent_memory_allocator.cc
|
| diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
|
| index 99ea44b8e9326a8c3d97b79663949c710f03d841..9d1f20e74cc469e685894d1a5e0e39dc68a99e17 100644
|
| --- a/base/metrics/persistent_memory_allocator.cc
|
| +++ b/base/metrics/persistent_memory_allocator.cc
|
| @@ -18,6 +18,7 @@
|
| #include "base/memory/shared_memory.h"
|
| #include "base/metrics/histogram_macros.h"
|
| #include "base/metrics/sparse_histogram.h"
|
| +#include "base/threading/thread_restrictions.h"
|
|
|
| namespace {
|
|
|
| @@ -32,7 +33,7 @@ const uint32_t kGlobalCookie = 0x408305DC;
|
| // The current version of the metadata. If updates are made that change
|
| // the metadata, the version number can be queried to operate in a backward-
|
| // compatible manner until the memory segment is completely re-initalized.
|
| -const uint32_t kGlobalVersion = 1;
|
| +const uint32_t kGlobalVersion = 2;
|
|
|
| // Constant values placed in the block headers to indicate its state.
|
| const uint32_t kBlockCookieFree = 0;
|
| @@ -43,7 +44,7 @@ const uint32_t kBlockCookieAllocated = 0xC8799269;
|
| // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
|
| // types rather than combined bitfield.
|
|
|
| -// Flags stored in the flags_ field of the SharedMetaData structure below.
|
| +// Flags stored in the flags_ field of the SharedMetadata structure below.
|
| enum : int {
|
| kFlagCorrupt = 1 << 0,
|
| kFlagFull = 1 << 1
|
| @@ -100,7 +101,9 @@ struct PersistentMemoryAllocator::BlockHeader {
|
| };
|
|
|
| // The shared metadata exists once at the top of the memory segment to
|
| -// describe the state of the allocator to all processes.
|
| +// describe the state of the allocator to all processes. The size of this
|
| +// structure must be a multiple of 64-bits to ensure compatibility between
|
| +// architectures.
|
| struct PersistentMemoryAllocator::SharedMetadata {
|
| uint32_t cookie; // Some value that indicates complete initialization.
|
| uint32_t size; // Total size of memory segment.
|
| @@ -108,10 +111,15 @@ struct PersistentMemoryAllocator::SharedMetadata {
|
| uint32_t version; // Version code so upgrades don't break.
|
| uint64_t id; // Arbitrary ID number given by creator.
|
| uint32_t name; // Reference to stored name string.
|
| + uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
|
|
|
| // Above is read-only after first construction. Below may be changed and
|
| // so must be marked "volatile" to provide correct inter-process behavior.
|
|
|
| + // State of the memory, plus some padding to keep alignment.
|
| + volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
|
| + uint8_t padding2[3];
|
| +
|
| // Bitfield of information flags. Access to this should be done through
|
| // the CheckFlag() and SetFlag() methods defined above.
|
| volatile std::atomic<uint32_t> flags;
|
| @@ -121,6 +129,7 @@ struct PersistentMemoryAllocator::SharedMetadata {
|
|
|
| // The "iterable" queue is an M&S Queue as described here, append-only:
|
| // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
|
| + // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
|
| volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
|
| volatile BlockHeader queue; // Empty block for linked-list head/tail.
|
| };
|
| @@ -312,7 +321,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
|
| // definitions and so cannot be moved to the global scope.
|
| static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
|
| "struct is not portable across different natural word widths");
|
| - static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
|
| + static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
|
| "struct is not portable across different natural word widths");
|
|
|
| static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
|
| @@ -384,12 +393,13 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
|
| if (name_cstr)
|
| memcpy(name_cstr, name.data(), name.length());
|
| }
|
| +
|
| + shared_meta()->memory_state.store(MEMORY_INITIALIZED,
|
| + std::memory_order_release);
|
| } else {
|
| - if (shared_meta()->size == 0 ||
|
| - shared_meta()->version == 0 ||
|
| + if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
|
| shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
|
| - shared_meta()->tailptr == 0 ||
|
| - shared_meta()->queue.cookie == 0 ||
|
| + shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
|
| shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
|
| SetCorrupt();
|
| }
|
| @@ -470,6 +480,19 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
|
| HistogramBase::kUmaTargetedHistogramFlag);
|
| }
|
|
|
| +void PersistentMemoryAllocator::Flush(bool sync) {
|
| + FlushPartial(used(), sync);
|
| +}
|
| +
|
| +void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
|
| + shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
|
| + FlushPartial(sizeof(SharedMetadata), false);
|
| +}
|
| +
|
| +uint8_t PersistentMemoryAllocator::GetMemoryState() const {
|
| + return shared_meta()->memory_state.load(std::memory_order_relaxed);
|
| +}
|
| +
|
| size_t PersistentMemoryAllocator::used() const {
|
| return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
|
| mem_size_);
|
| @@ -807,8 +830,12 @@ const volatile PersistentMemoryAllocator::BlockHeader*
|
| PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
|
| uint32_t size, bool queue_ok,
|
| bool free_ok) const {
|
| + // Handle special cases.
|
| + if (ref == kReferenceQueue && queue_ok)
|
| + return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
|
| +
|
| // Validation of parameters.
|
| - if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
|
| + if (ref < sizeof(SharedMetadata))
|
| return nullptr;
|
| if (ref % kAllocAlignment != 0)
|
| return nullptr;
|
| @@ -818,17 +845,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
|
|
|
| // Validation of referenced block-header.
|
| if (!free_ok) {
|
| - uint32_t freeptr = std::min(
|
| - shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
|
| - if (ref + size > freeptr)
|
| - return nullptr;
|
| const volatile BlockHeader* const block =
|
| reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
|
| - if (block->size < size)
|
| + if (block->cookie != kBlockCookieAllocated)
|
| return nullptr;
|
| - if (ref + block->size > freeptr)
|
| + if (block->size < size)
|
| return nullptr;
|
| - if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
|
| + if (ref + block->size > mem_size_)
|
| return nullptr;
|
| if (type_id != 0 &&
|
| block->type_id.load(std::memory_order_relaxed) != type_id) {
|
| @@ -840,6 +863,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
|
| return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
|
| }
|
|
|
| +void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
|
| + // Generally there is nothing to do as every write is done through volatile
|
| + // memory with atomic instructions to guarantee consistency. This (virtual)
|
| + // method exists so that derivced classes can do special things, such as
|
| + // tell the OS to write changes to disk now rather than when convenient.
|
| +}
|
| +
|
| void PersistentMemoryAllocator::RecordError(int error) const {
|
| if (errors_histogram_)
|
| errors_histogram_->Add(error);
|
| @@ -980,7 +1010,12 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
|
| id,
|
| name,
|
| read_only),
|
| - mapped_file_(std::move(file)) {}
|
| + mapped_file_(std::move(file)) {
|
| + // Ensure the disk-copy of the data reflects the fully-initialized memory as
|
| + // there is no guarantee as to what order the pages might be auto-flushed by
|
| + // the OS in the future.
|
| + Flush(true);
|
| +}
|
|
|
| FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
|
|
|
| @@ -990,6 +1025,33 @@ bool FilePersistentMemoryAllocator::IsFileAcceptable(
|
| bool read_only) {
|
| return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
|
| }
|
| +
|
| +void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
|
| + if (sync)
|
| + ThreadRestrictions::AssertIOAllowed();
|
| + if (IsReadonly())
|
| + return;
|
| +
|
| +#if defined(OS_WIN)
|
| + // Windows doesn't support a synchronous flush.
|
| + BOOL success = ::FlushViewOfFile(data(), length);
|
| + DPCHECK(success);
|
| +#elif defined(OS_MACOSX)
|
| + // On OSX, "invalidate" removes all cached pages, forcing a re-read from
|
| + // disk. That's not applicable to "flush" so omit it.
|
| + int result =
|
| + ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
|
| + DCHECK_NE(EINVAL, result);
|
| +#elif defined(OS_POSIX)
|
| + // On POSIX, "invalidate" forces _other_ processes to recognize what has
|
| + // been written to disk and so is applicable to "flush".
|
| + int result = ::msync(const_cast<void*>(data()), length,
|
| + MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
|
| + DCHECK_NE(EINVAL, result);
|
| +#else
|
| +#error Unsupported OS.
|
| +#endif
|
| +}
|
| #endif // !defined(OS_NACL)
|
|
|
| } // namespace base
|
|
|