Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(440)

Unified Diff: src/minsfi/trusted/loader.c

Issue 539683002: MinSFI: Add loader (Closed) Base URL: https://chromium.googlesource.com/native_client/src/native_client.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/minsfi/trusted/loader.c
diff --git a/src/minsfi/trusted/loader.c b/src/minsfi/trusted/loader.c
new file mode 100644
index 0000000000000000000000000000000000000000..82b3424122bf63d060b1836f3a7088f1c1896b4d
--- /dev/null
+++ b/src/minsfi/trusted/loader.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014 The Native Client Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <string.h>
+#include <sys/mman.h>
+
+#include "native_client/src/include/minsfi_priv.h"
+
+static inline bool is_power_of_two(uint32_t x) {
+ return ((x != 0) && !(x & (x - 1)));
+}
+
+static inline bool is_page_aligned(uint32_t addr, uint32_t page_size) {
+ return !(addr & (page_size-1));
+}
+
+/* Round up to the nearest multiple of pow2 (some power of two). */
+static inline uint32_t roundup2(uint32_t x, uint32_t pow2) {
+ return (x + pow2 - 1) & (~(pow2 - 1));
+}
+
+/*
+ * Checks that the region does form a valid interval inside the allocated
+ * subspace.
+ */
+static inline bool is_valid(region reg, uint64_t subspace_size,
+ uint32_t page_size) {
+ uint64_t start = reg.offset;
+ uint64_t end = start + reg.length;
+
+ return (start < end) && /* implies reg.length >= page_size */
jvoung (off chromium) 2014/09/05 00:31:59 For the "implies", it's this check plus the page_a
dbrazdil 2014/09/05 19:41:38 Done.
+ (start <= subspace_size) &&
+ (end <= subspace_size) &&
+ is_page_aligned(start, page_size) &&
+ is_page_aligned(end, page_size);
+}
+
+/*
+ * Checks that region1 is followed by region2 with the given gap between them.
+ */
+static inline bool are_adjacent(region region1, region region2, uint32_t gap) {
+ return region1.offset + region1.length + gap == region2.offset;
+}
+
+static inline uint64_t address_space_size(manifest *sb) {
jvoung (off chromium) 2014/09/05 00:32:00 could make these pointers const too?
dbrazdil 2014/09/05 19:41:37 Done.
+ return (1LL << sb->ptr_size);
+}
+
+/*
+ * Returns the amount of memory actually addressable by the sandbox, i.e. twice
+ * the size of the address subspace.
+ * See the comments in the SandboxMemoryAccessess LLVM pass for more details.
+ */
+static inline size_t addressable_memory_size(manifest *sb) {
+ return address_space_size(sb) * 2;
+}
+
+bool generate_layout(manifest *sb, uint32_t page_size, layout *mem) {
+ uint64_t subspace_size;
+
+ if (sb->ptr_size < 20 || sb->ptr_size > 32 || !is_power_of_two(page_size))
+ return false;
+
+ subspace_size = address_space_size(sb);
+
+ /*
+ * Data segment is positioned at a fixed offset. The size of the
+ * region is rounded to the end of a page.
+ */
+ mem->dataseg.offset = sb->dataseg_offset;
+ mem->dataseg.length = roundup2(sb->dataseg_size, page_size);
+
+ /*
+ * Size of the stack is currently a fixed constant, located at the
+ * end of the address space.
+ */
+ mem->stack.length = 32 * page_size;
+ mem->stack.offset = subspace_size - mem->stack.length;
+
+ /*
+ * Heap fills the space between the data segment and the stack, separated
+ * by a guard page at each end. We check that it is at least one page long.
+ */
+ mem->heap.offset = mem->dataseg.offset + mem->dataseg.length + page_size;
+ mem->heap.length = mem->stack.offset - page_size - mem->heap.offset;
+
+ /*
+ * Verify that the memory layout is sane. This is important because
+ * we do not verify the parameters at the beginning of this function
+ * and therefore the values could have overflowed.
+ */
+ return is_valid(mem->dataseg, subspace_size, page_size) &&
+ is_valid(mem->heap, subspace_size, page_size) &&
+ is_valid(mem->stack, subspace_size, page_size) &&
+ are_adjacent(mem->dataseg, mem->heap, page_size) &&
+ are_adjacent(mem->heap, mem->stack, page_size);
+}
+
+/* Change the access rights of a given memory region to read/write. */
+static inline bool enable_region(char *base, region reg) {
+ char *region_base = base + reg.offset;
+ return region_base == mmap(region_base, reg.length,
+ PROT_READ | PROT_WRITE,
jvoung (off chromium) 2014/09/05 00:31:59 Hmm, didn't really think of this earlier, but we'v
dbrazdil 2014/09/05 19:41:37 Yep, thought of it too when I was writing this. I'
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+}
+
+char *init_sandbox(manifest *sb) {
+ char *base;
+ layout mem;
+ size_t total_mem;
+
+ /* Compute the boundaries of the data segment, heap and stack. */
+ if (!generate_layout(sb, getpagesize(), &mem))
+ return NULL;
+
+ total_mem = addressable_memory_size(sb);
+ base = mmap(NULL, total_mem, 0, MAP_ANON | MAP_PRIVATE, -1, 0);
jvoung (off chromium) 2014/09/05 00:31:59 Should we have MAP_NORESERVE also?
dbrazdil 2014/09/05 19:41:37 I'm not entirely sure what that does. But it sound
+ if (base == MAP_FAILED)
+ return NULL;
+
+ /*
+ * Change the rights of accessible pages to read/write. Unmap the whole
+ * memory region if the operation fails.
+ */
+ if (!enable_region(base, mem.dataseg) || !enable_region(base, mem.heap) ||
+ !enable_region(base, mem.stack)) {
+ destroy_sandbox(base, sb);
+ return NULL;
+ }
+
+ /* Copy the data segment template into the memory subspace. */
+ memcpy(base + mem.dataseg.offset, sb->dataseg_template, sb->dataseg_size);
+
+ return base;
+}
+
+bool destroy_sandbox(char *base, manifest *sb) {
+ return munmap(base, addressable_memory_size(sb)) == 0;
+}

Powered by Google App Engine
This is Rietveld 408576698