| Index: base/memory/shared_memory_allocator.h
|
| diff --git a/base/memory/shared_memory_allocator.h b/base/memory/shared_memory_allocator.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..5410ab771f9dddd17d12e9be01788437c15410e6
|
| --- /dev/null
|
| +++ b/base/memory/shared_memory_allocator.h
|
| @@ -0,0 +1,156 @@
|
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#ifndef BASE_MEMORY_SHARED_MEMORY_ALLOCATOR_H_
|
| +#define BASE_MEMORY_SHARED_MEMORY_ALLOCATOR_H_
|
| +
|
| +#include <stdint.h>
|
| +
|
| +#include "base/atomicops.h"
|
| +#include "base/base_export.h"
|
| +#include "base/macros.h"
|
| +
|
| +namespace base {
|
| +
|
| +// Simple allocator for pieces of a memory block that may be shared across
|
| +// multiple processes.
|
| +//
|
| +// This class provides for thread-secure (i.e. safe against other threads
|
| +// or processes that may be compromised and thus have malicious intent)
|
| +// allocation of memory within a designated block and also a mechanism by
|
| +// which other threads can learn of the allocations with any additional
|
| +// shared information.
|
| +//
|
| +// There is (currently) no way to release an allocated block of data because
|
| +// doing so would risk invalidating pointers held by other processes and
|
| +// greatly complicate the allocation algorithm.
|
| +//
|
| +// Construction of this object can accept new, clean (i.e. zeroed) memory
|
| +// or previously initialized memory. In the first case, construction must
|
| +// be allowed to complete before letting other allocators attach to the same
|
| +// segment. In other words, don't share the segment until at least one
|
| +// allocator has been attached to it.
|
| +//
|
| +// It should be noted that memory doesn't need to actually have zeros written
|
| +// throughout; it just needs to read as zero until something diffferent is
|
| +// written to a location. This is an important distinction as it supports the
|
| +// use-case of non-pinned memory, such as from a demand-allocated region by
|
| +// the OS or a memory-mapped file that auto-grows from a starting size of zero.
|
| +class BASE_EXPORT SharedMemoryAllocator {
|
| + public:
|
| + // Internal state information when iterating over memory allocations.
|
| + struct Iterator {
|
| + int32_t last;
|
| + uint32_t niter;
|
| + };
|
| +
|
| + // Returned information about the internal state of the heap.
|
| + struct MemoryInfo {
|
| + int32_t total;
|
| + int32_t free;
|
| + };
|
| +
|
| + enum : int32_t {
|
| + kTypeIdAny = 0 // Match any type-id inside GetAsObject().
|
| + };
|
| +
|
| + // The allocator operates on any arbitrary block of memory. Creation and
|
| + // sharing of that block with another process is the responsibility of the
|
| + // caller. The allocator needs to know only the block's |base| address, the
|
| + // total |size| of the block, and any internal |page| size (zero if not
|
| + // paged) across which allocations should not span.
|
| + //
|
| + // SharedMemoryAllocator does NOT take ownership of this memory block. The
|
| + // caller must manage it and ensure it stays available throughout the lifetime
|
| + // of this object.
|
| + //
|
| + // Memory segments for sharing must have had an allocator attached to them
|
| + // before actually being shared. If the memory segment was just created, it
|
| + // should be zeroed. If it was an existing segment, the values here will
|
| + // be compared to copies stored in the shared segment as a guard against
|
| + // corruption.
|
| + SharedMemoryAllocator(void* base, int32_t size, int32_t page_size);
|
| + ~SharedMemoryAllocator();
|
| +
|
| + // Get an object referenced by an |offset|. For safety reasons, the |type_id|
|
| + // code and size-of(|T|) are compared to ensure the reference is valid
|
| + // and cannot return an object outside of the memory segment. A |type_id| of
|
| + // zero will match any though the size is still checked. NULL is returned
|
| + // if any problem is detected, such as corrupted storage or incorrect
|
| + // parameters. Callers MUST check that the returned value is not-null EVERY
|
| + // TIME before accessing it or risk crashing! Once dereferenced, the pointer
|
| + // is safe to reuse forever.
|
| + //
|
| + // NOTE: Though this method will guarantee that an object of the specified
|
| + // type can be accessed without going outside the bounds of the memory
|
| + // segment, it makes not guarantees of the validity of the data within the
|
| + // object itself. If it is expected that the contents of the segment could
|
| + // be compromised with malicious intent, the object must be hardened as well.
|
| + template <typename T>
|
| + T* GetAsObject(int32_t offset, int32_t type_id) {
|
| + return static_cast<T*>(GetBlockData(offset, type_id, sizeof(T), false));
|
| + }
|
| +
|
| + // Reserve space in the memory segment of the desired |size| and |type_id|.
|
| + // A return value of zero indicates the allocation failed, otherwise the
|
| + // returned offset can be used by any process to get a real pointer via
|
| + // the GetAsObject() call.
|
| + int32_t Allocate(int32_t size, int32_t type_id);
|
| +
|
| + // Allocated objects can be added to an internal list that can then be
|
| + // iterated over by other processes. If an allocated object can be found
|
| + // another way, such as by having its offset within a different object
|
| + // that will be made iterable, then this call is not necessary. This always
|
| + // succeeds unless corruption is detected; check IsCorrupted() to find out.
|
| + void MakeIterable(int32_t offset);
|
| +
|
| + // Get the information about the amount of free space in the allocator. The
|
| + // amount of free space should be treated as approximate due to extras from
|
| + // alignment and metadata. Concurrent allocations from other threads will
|
| + // also make the true amount less than what is reported. It will never
|
| + // return _less_ than could actually be allocated.
|
| + void GetMemoryInfo(MemoryInfo* meminfo);
|
| +
|
| + // Iterating uses a |state| structure (initialized by CreateIterator) and
|
| + // returns both the offset reference to the object as well as the |type_id|
|
| + // of that object. A zero return value indicates there are currently no more
|
| + // objects to be found but future attempts can be made without having to
|
| + // reset the iterator to "first".
|
| + void CreateIterator(Iterator* state);
|
| + int32_t GetNextIterable(Iterator* state, int32_t* type_id);
|
| +
|
| + // If there is some indication that the shared memory has become corrupted,
|
| + // calling this will attempt to prevent further damage by indicating to
|
| + // all processes that something is not as expected.
|
| + void SetCorrupted();
|
| +
|
| + // This can be called to determine if corruption has been detected in the
|
| + // shared segment, possibly my a malicious actor. Once detected, future
|
| + // allocations will fail and iteration may not locate all objects.
|
| + bool IsCorrupted();
|
| +
|
| + // Flag set if an allocation has failed because memory was full.
|
| + bool IsFull();
|
| +
|
| + private:
|
| + struct SharedMetadata;
|
| + struct BlockHeader;
|
| +
|
| + BlockHeader* GetBlock(int32_t offset, int32_t type_id, int32_t size,
|
| + bool special);
|
| + void* GetBlockData(int32_t offset, int32_t type_id, int32_t size,
|
| + bool special);
|
| +
|
| + SharedMetadata* shared_meta_; // Pointer to start of memory segment.
|
| + char* mem_base_; // Same. (char because sizeof guaranteed 1)
|
| + int32_t mem_size_; // Size of entire memory segment.
|
| + int32_t mem_page_; // Page size allocations shouldn't cross.
|
| + subtle::Atomic32 corrupted_; // TODO(bcwhite): Use std::atomic<char> when ok.
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(SharedMemoryAllocator);
|
| +};
|
| +
|
| +} // namespace base
|
| +
|
| +#endif // BASE_MEMORY_SHARED_MEMORY_ALLOCATOR_H_
|
|
|