Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/minsfi/trusted/loader.c

Issue 539683002: MinSFI: Add loader (Closed) Base URL: https://chromium.googlesource.com/native_client/src/native_client.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2014 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 #include <string.h>
8 #include <sys/mman.h>
9
10 #include "native_client/src/include/minsfi_priv.h"
11
12 static inline bool is_power_of_two(uint32_t x) {
13 return ((x != 0) && !(x & (x - 1)));
14 }
15
16 static inline bool is_page_aligned(uint32_t addr, uint32_t page_size) {
17 return !(addr & (page_size-1));
18 }
19
20 /* Round up to the nearest multiple of pow2 (some power of two). */
21 static inline uint32_t roundup2(uint32_t x, uint32_t pow2) {
22 return (x + pow2 - 1) & (~(pow2 - 1));
23 }
24
25 /*
26 * Checks that the region does form a valid interval inside the allocated
27 * subspace.
28 */
29 static inline bool is_valid(region reg, uint64_t subspace_size,
30 uint32_t page_size) {
31 uint64_t start = reg.offset;
32 uint64_t end = start + reg.length;
33
34 return (start < end) && /* implies reg.length >= page_size */
jvoung (off chromium) 2014/09/05 00:31:59 For the "implies", it's this check plus the page_a
dbrazdil 2014/09/05 19:41:38 Done.
35 (start <= subspace_size) &&
36 (end <= subspace_size) &&
37 is_page_aligned(start, page_size) &&
38 is_page_aligned(end, page_size);
39 }
40
41 /*
42 * Checks that region1 is followed by region2 with the given gap between them.
43 */
44 static inline bool are_adjacent(region region1, region region2, uint32_t gap) {
45 return region1.offset + region1.length + gap == region2.offset;
46 }
47
48 static inline uint64_t address_space_size(manifest *sb) {
jvoung (off chromium) 2014/09/05 00:32:00 could make these pointers const too?
dbrazdil 2014/09/05 19:41:37 Done.
49 return (1LL << sb->ptr_size);
50 }
51
52 /*
53 * Returns the amount of memory actually addressable by the sandbox, i.e. twice
54 * the size of the address subspace.
55 * See the comments in the SandboxMemoryAccessess LLVM pass for more details.
56 */
57 static inline size_t addressable_memory_size(manifest *sb) {
58 return address_space_size(sb) * 2;
59 }
60
61 bool generate_layout(manifest *sb, uint32_t page_size, layout *mem) {
62 uint64_t subspace_size;
63
64 if (sb->ptr_size < 20 || sb->ptr_size > 32 || !is_power_of_two(page_size))
65 return false;
66
67 subspace_size = address_space_size(sb);
68
69 /*
70 * Data segment is positioned at a fixed offset. The size of the
71 * region is rounded to the end of a page.
72 */
73 mem->dataseg.offset = sb->dataseg_offset;
74 mem->dataseg.length = roundup2(sb->dataseg_size, page_size);
75
76 /*
77 * Size of the stack is currently a fixed constant, located at the
78 * end of the address space.
79 */
80 mem->stack.length = 32 * page_size;
81 mem->stack.offset = subspace_size - mem->stack.length;
82
83 /*
84 * Heap fills the space between the data segment and the stack, separated
85 * by a guard page at each end. We check that it is at least one page long.
86 */
87 mem->heap.offset = mem->dataseg.offset + mem->dataseg.length + page_size;
88 mem->heap.length = mem->stack.offset - page_size - mem->heap.offset;
89
90 /*
91 * Verify that the memory layout is sane. This is important because
92 * we do not verify the parameters at the beginning of this function
93 * and therefore the values could have overflowed.
94 */
95 return is_valid(mem->dataseg, subspace_size, page_size) &&
96 is_valid(mem->heap, subspace_size, page_size) &&
97 is_valid(mem->stack, subspace_size, page_size) &&
98 are_adjacent(mem->dataseg, mem->heap, page_size) &&
99 are_adjacent(mem->heap, mem->stack, page_size);
100 }
101
102 /* Change the access rights of a given memory region to read/write. */
103 static inline bool enable_region(char *base, region reg) {
104 char *region_base = base + reg.offset;
105 return region_base == mmap(region_base, reg.length,
106 PROT_READ | PROT_WRITE,
jvoung (off chromium) 2014/09/05 00:31:59 Hmm, didn't really think of this earlier, but we'v
dbrazdil 2014/09/05 19:41:37 Yep, thought of it too when I was writing this. I'
107 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
108 }
109
110 char *init_sandbox(manifest *sb) {
111 char *base;
112 layout mem;
113 size_t total_mem;
114
115 /* Compute the boundaries of the data segment, heap and stack. */
116 if (!generate_layout(sb, getpagesize(), &mem))
117 return NULL;
118
119 total_mem = addressable_memory_size(sb);
120 base = mmap(NULL, total_mem, 0, MAP_ANON | MAP_PRIVATE, -1, 0);
jvoung (off chromium) 2014/09/05 00:31:59 Should we have MAP_NORESERVE also?
dbrazdil 2014/09/05 19:41:37 I'm not entirely sure what that does. But it sound
121 if (base == MAP_FAILED)
122 return NULL;
123
124 /*
125 * Change the rights of accessible pages to read/write. Unmap the whole
126 * memory region if the operation fails.
127 */
128 if (!enable_region(base, mem.dataseg) || !enable_region(base, mem.heap) ||
129 !enable_region(base, mem.stack)) {
130 destroy_sandbox(base, sb);
131 return NULL;
132 }
133
134 /* Copy the data segment template into the memory subspace. */
135 memcpy(base + mem.dataseg.offset, sb->dataseg_template, sb->dataseg_size);
136
137 return base;
138 }
139
140 bool destroy_sandbox(char *base, manifest *sb) {
141 return munmap(base, addressable_memory_size(sb)) == 0;
142 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698