Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(42)

Side by Side Diff: src/minsfi/trusted/loader.c

Issue 539683002: MinSFI: Add loader (Closed) Base URL: https://chromium.googlesource.com/native_client/src/native_client.git@master
Patch Set: Active sandbox interface Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2014 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 #include <string.h>
8 #include <sys/mman.h>
9
10 #include "native_client/src/include/minsfi_priv.h"
11
12 static inline bool IsPowerOfTwo(uint32_t x) {
13 return ((x != 0) && !(x & (x - 1)));
14 }
15
16 static inline bool IsAlignedPow2(uint32_t addr, uint32_t pow2) {
17 return !(addr & (pow2 - 1));
18 }
19
20 /* Round up to the nearest multiple of pow2 (some power of two). */
21 static inline uint32_t RoundUpToMultiplePow2(uint32_t x, uint32_t pow2) {
22 return (x + pow2 - 1) & (~(pow2 - 1));
23 }
24
25 /*
26 * Checks that the region does form a valid interval inside the allocated
27 * subspace.
28 */
29 static inline bool IsValidRegion(const MinsfiMemoryRegion *reg,
30 uint64_t subspace_size, uint32_t page_size) {
31 uint64_t region_start = reg->offset;
32 uint64_t region_end = region_start + reg->length;
33
34 /* Run the checks. Note that page alignment together with start != end imply
jvoung (off chromium) 2014/09/08 23:18:47 nit: /* * blah */ like the comment style below
dbrazdil 2014/09/09 00:57:53 Done.
35 * that the region is at least one page in size */
36 return (region_start < region_end) &&
37 (region_start <= subspace_size) &&
38 (region_end <= subspace_size) &&
39 IsAlignedPow2(region_start, page_size) &&
40 IsAlignedPow2(region_end, page_size);
41 }
42
43 /*
44 * Checks that region1 is followed by region2 with the given gap between them.
45 */
46 static inline bool AreAdjacentRegions(const MinsfiMemoryRegion *region1,
47 const MinsfiMemoryRegion *region2,
48 uint32_t gap) {
49 return region1->offset + region1->length + gap == region2->offset;
50 }
51
52 static inline uint64_t AddressSubspaceSize(const MinsfiManifest *manifest) {
53 return (1LL << manifest->ptr_size);
54 }
55
56 /*
57 * Returns the amount of memory actually addressable by the sandbox, i.e. twice
58 * the size of the address subspace.
59 * See comments in the SandboxMemoryAccessess LLVM pass for more details.
60 */
61 static inline size_t AddressableMemorySize(const MinsfiManifest *manifest) {
62 return AddressSubspaceSize(manifest) * 2;
63 }
64
65 bool MinsfiGenerateMemoryLayout(const MinsfiManifest *manifest,
66 uint32_t page_size, MinsfiMemoryLayout *mem) {
67 uint64_t subspace_size;
68
69 if (manifest->ptr_size < 20 || manifest->ptr_size > 32 ||
70 !IsPowerOfTwo(page_size))
71 return false;
72
73 subspace_size = AddressSubspaceSize(manifest);
74
75 /*
76 * Data segment is positioned at a fixed offset. The size of the region
77 * is rounded to the end of a page.
78 */
79 mem->dataseg.offset = manifest->dataseg_offset;
80 mem->dataseg.length = RoundUpToMultiplePow2(manifest->dataseg_size,
81 page_size);
82
83 /*
84 * Size of the stack is currently a fixed constant, located at the
85 * end of the address space.
86 */
87 mem->stack.length = 32 * page_size;
88 mem->stack.offset = subspace_size - mem->stack.length;
89
90 /*
91 * Heap fills the space between the data segment and the stack, separated
92 * by a guard page at each end. We check that it is at least one page long.
93 */
94 mem->heap.offset = mem->dataseg.offset + mem->dataseg.length + page_size;
95 mem->heap.length = mem->stack.offset - page_size - mem->heap.offset;
96
97 /*
98 * Verify that the memory layout is sane. This is important because
99 * we do not verify the parameters at the beginning of this function
100 * and therefore the values could have overflowed.
101 */
102 return IsValidRegion(&mem->dataseg, subspace_size, page_size) &&
103 IsValidRegion(&mem->heap, subspace_size, page_size) &&
104 IsValidRegion(&mem->stack, subspace_size, page_size) &&
105 AreAdjacentRegions(&mem->dataseg, &mem->heap, /*gap=*/ page_size) &&
106 AreAdjacentRegions(&mem->heap, &mem->stack, /*gap=*/ page_size);
107 }
108
109 /* Change the access rights of a given memory region to read/write. */
110 static inline bool EnableMemoryRegion(char *mem_base,
111 const MinsfiMemoryRegion *reg) {
112 char *region_base = mem_base + reg->offset;
113 return region_base == mmap(region_base, reg->length,
114 PROT_READ | PROT_WRITE,
115 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
116 }
117
118 bool MinsfiInitSandbox(const MinsfiManifest *manifest, MinsfiSandbox *sb) {
119 /* Compute the boundaries of the data segment, heap and stack. Verify
jvoung (off chromium) 2014/09/08 23:18:47 /* * Blah */ here and below
dbrazdil 2014/09/09 00:57:52 Done.
120 * that they are sane. */
121 if (!MinsfiGenerateMemoryLayout(manifest, getpagesize(), &sb->mem_layout))
122 return false;
123
124 /* Compute properties of the sandbox */
125 sb->mem_alloc_size = AddressableMemorySize(manifest);
126 sb->ptr_mask = AddressSubspaceSize(manifest) - 1;
127
128 /* Allocate memory for the sandbox's address subspace */
129 sb->mem_base = mmap(NULL, sb->mem_alloc_size, 0,
jvoung (off chromium) 2014/09/08 23:18:47 Could use PROT_NONE instead of 0, to be clear.
dbrazdil 2014/09/09 00:57:52 Done.
130 MAP_ANON | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
131 if (sb->mem_base == MAP_FAILED)
132 return false;
133
134 /* Change the rights of accessible pages to read/write. Unmap the whole
135 * memory region if the operation fails. */
136 if (!EnableMemoryRegion(sb->mem_base, &sb->mem_layout.dataseg) ||
137 !EnableMemoryRegion(sb->mem_base, &sb->mem_layout.heap) ||
138 !EnableMemoryRegion(sb->mem_base, &sb->mem_layout.stack)) {
139 MinsfiUnmapSandbox(sb);
140 return false;
141 }
142
143 /* Copy the data segment template into the memory subspace. */
144 memcpy(sb->mem_base + sb->mem_layout.dataseg.offset,
145 manifest->dataseg_template, manifest->dataseg_size);
146
147 return true;
148 }
149
150 bool MinsfiUnmapSandbox(const MinsfiSandbox *sb) {
151 return munmap(sb->mem_base, sb->mem_alloc_size) == 0;
152 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698