OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2014 The Native Client Authors. All rights reserved. |
| 3 * Use of this source code is governed by a BSD-style license that can be |
| 4 * found in the LICENSE file. |
| 5 */ |
| 6 |
| 7 #include <string.h> |
| 8 #include <sys/mman.h> |
| 9 |
| 10 #include "native_client/src/include/minsfi_priv.h" |
| 11 |
| 12 static inline bool IsPowerOfTwo(uint32_t x) { |
| 13 return ((x != 0) && !(x & (x - 1))); |
| 14 } |
| 15 |
| 16 static inline bool IsAlignedPow2(uint32_t addr, uint32_t pow2) { |
| 17 return !(addr & (pow2 - 1)); |
| 18 } |
| 19 |
| 20 /* Round up to the nearest multiple of pow2 (some power of two). */ |
| 21 static inline uint32_t RoundUpToMultiplePow2(uint32_t x, uint32_t pow2) { |
| 22 return (x + pow2 - 1) & (~(pow2 - 1)); |
| 23 } |
| 24 |
| 25 /* |
| 26 * Checks that the region does form a valid interval inside the allocated |
| 27 * subspace. |
| 28 */ |
| 29 static inline bool IsValidRegion(const MinsfiMemoryRegion *reg, |
| 30 uint64_t subspace_size, uint32_t page_size) { |
| 31 uint64_t region_start = reg->offset; |
| 32 uint64_t region_end = region_start + reg->length; |
| 33 |
| 34 /* |
| 35 * Run the checks. Note that page alignment together with start != end imply |
| 36 * that the region is at least one page in size |
| 37 */ |
| 38 return (region_start < region_end) && |
| 39 (region_start <= subspace_size) && |
| 40 (region_end <= subspace_size) && |
| 41 IsAlignedPow2(region_start, page_size) && |
| 42 IsAlignedPow2(region_end, page_size); |
| 43 } |
| 44 |
| 45 /* |
| 46 * Checks that region1 is followed by region2 with the given gap between them. |
| 47 */ |
| 48 static inline bool AreAdjacentRegions(const MinsfiMemoryRegion *region1, |
| 49 const MinsfiMemoryRegion *region2, |
| 50 uint32_t gap) { |
| 51 return region1->offset + region1->length + gap == region2->offset; |
| 52 } |
| 53 |
| 54 static inline uint64_t AddressSubspaceSize(const MinsfiManifest *manifest) { |
| 55 return (1LL << manifest->ptr_size); |
| 56 } |
| 57 |
| 58 /* |
| 59 * Returns the amount of memory actually addressable by the sandbox, i.e. twice |
| 60 * the size of the address subspace. |
| 61 * See comments in the SandboxMemoryAccessess LLVM pass for more details. |
| 62 */ |
| 63 static inline size_t AddressableMemorySize(const MinsfiManifest *manifest) { |
| 64 return AddressSubspaceSize(manifest) * 2; |
| 65 } |
| 66 |
| 67 bool MinsfiGenerateMemoryLayout(const MinsfiManifest *manifest, |
| 68 uint32_t page_size, MinsfiMemoryLayout *mem) { |
| 69 uint64_t subspace_size; |
| 70 |
| 71 if (manifest->ptr_size < 20 || manifest->ptr_size > 32 || |
| 72 !IsPowerOfTwo(page_size)) |
| 73 return false; |
| 74 |
| 75 subspace_size = AddressSubspaceSize(manifest); |
| 76 |
| 77 /* |
| 78 * Data segment is positioned at a fixed offset. The size of the region |
| 79 * is rounded to the end of a page. |
| 80 */ |
| 81 mem->dataseg.offset = manifest->dataseg_offset; |
| 82 mem->dataseg.length = RoundUpToMultiplePow2(manifest->dataseg_size, |
| 83 page_size); |
| 84 |
| 85 /* |
| 86 * Size of the stack is currently a fixed constant, located at the |
| 87 * end of the address space. |
| 88 */ |
| 89 mem->stack.length = 32 * page_size; |
| 90 mem->stack.offset = subspace_size - mem->stack.length; |
| 91 |
| 92 /* |
| 93 * Heap fills the space between the data segment and the stack, separated |
| 94 * by a guard page at each end. We check that it is at least one page long. |
| 95 */ |
| 96 mem->heap.offset = mem->dataseg.offset + mem->dataseg.length + page_size; |
| 97 mem->heap.length = mem->stack.offset - page_size - mem->heap.offset; |
| 98 |
| 99 /* |
| 100 * Verify that the memory layout is sane. This is important because |
| 101 * we do not verify the parameters at the beginning of this function |
| 102 * and therefore the values could have overflowed. |
| 103 */ |
| 104 return IsValidRegion(&mem->dataseg, subspace_size, page_size) && |
| 105 IsValidRegion(&mem->heap, subspace_size, page_size) && |
| 106 IsValidRegion(&mem->stack, subspace_size, page_size) && |
| 107 AreAdjacentRegions(&mem->dataseg, &mem->heap, /*gap=*/ page_size) && |
| 108 AreAdjacentRegions(&mem->heap, &mem->stack, /*gap=*/ page_size); |
| 109 } |
| 110 |
| 111 /* Change the access rights of a given memory region to read/write. */ |
| 112 static inline bool EnableMemoryRegion(char *mem_base, |
| 113 const MinsfiMemoryRegion *reg) { |
| 114 char *region_base = mem_base + reg->offset; |
| 115 return region_base == mmap(region_base, reg->length, |
| 116 PROT_READ | PROT_WRITE, |
| 117 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); |
| 118 } |
| 119 |
| 120 bool MinsfiInitSandbox(const MinsfiManifest *manifest, MinsfiSandbox *sb) { |
| 121 /* |
| 122 * Compute the boundaries of the data segment, heap and stack. Verify |
| 123 * that they are sane. |
| 124 */ |
| 125 if (!MinsfiGenerateMemoryLayout(manifest, getpagesize(), &sb->mem_layout)) |
| 126 return false; |
| 127 |
| 128 /* Compute properties of the sandbox */ |
| 129 sb->mem_alloc_size = AddressableMemorySize(manifest); |
| 130 sb->ptr_mask = AddressSubspaceSize(manifest) - 1; |
| 131 |
| 132 /* Allocate memory for the sandbox's address subspace */ |
| 133 sb->mem_base = mmap(NULL, sb->mem_alloc_size, |
| 134 PROT_NONE, |
| 135 MAP_ANON | MAP_PRIVATE | MAP_NORESERVE, -1, 0); |
| 136 if (sb->mem_base == MAP_FAILED) |
| 137 return false; |
| 138 |
| 139 /* |
| 140 * Change the rights of accessible pages to read/write. Unmap the whole |
| 141 * memory region if the operation fails. |
| 142 */ |
| 143 if (!EnableMemoryRegion(sb->mem_base, &sb->mem_layout.dataseg) || |
| 144 !EnableMemoryRegion(sb->mem_base, &sb->mem_layout.heap) || |
| 145 !EnableMemoryRegion(sb->mem_base, &sb->mem_layout.stack)) { |
| 146 MinsfiUnmapSandbox(sb); |
| 147 return false; |
| 148 } |
| 149 |
| 150 /* Copy the data segment template into the memory subspace. */ |
| 151 memcpy(sb->mem_base + sb->mem_layout.dataseg.offset, |
| 152 manifest->dataseg_template, manifest->dataseg_size); |
| 153 |
| 154 return true; |
| 155 } |
| 156 |
| 157 bool MinsfiUnmapSandbox(const MinsfiSandbox *sb) { |
| 158 return munmap(sb->mem_base, sb->mem_alloc_size) == 0; |
| 159 } |
OLD | NEW |