Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(381)

Unified Diff: components/nacl/loader/bare_metal/elf_util.c

Issue 100373005: Initial implementation of Bare Metal Mode for NaCl. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: components/nacl/loader/bare_metal/elf_util.c
diff --git a/components/nacl/loader/bare_metal/elf_util.c b/components/nacl/loader/bare_metal/elf_util.c
new file mode 100644
index 0000000000000000000000000000000000000000..ade247346f4eb4e6e709c61fb7b1fed118fc143c
--- /dev/null
+++ b/components/nacl/loader/bare_metal/elf_util.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright 2013 The Chromium Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "components/nacl/loader/bare_metal/elf_util.h"
+
+#include <string.h>
+#include <sys/mman.h>
+
+#define NACL_LOG_MODULE_NAME "elf_util"
+
+#include "native_client/src/include/elf.h"
+#include "native_client/src/include/portability.h"
+#include "native_client/src/shared/platform/nacl_host_desc.h"
+#include "native_client/src/shared/platform/nacl_log.h"
+#include "native_client/src/trusted/desc/nacl_desc_base.h"
+#include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
+
+/* Limit of elf program headers allowed. */
+#define NACL_BARE_METAL_MAX_PROGRAM_HEADERS 128
+
+/* Copied from native_client/src/trusted/service_runtime/nacl_config.h */
+#if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86
+# if NACL_BUILD_SUBARCH == 32
+# define NACL_ELF_E_MACHINE EM_386
+# elif NACL_BUILD_SUBARCH == 64
+# define NACL_ELF_E_MACHINE EM_X86_64
+# else /* NACL_BUILD_SUBARCH */
+# error Unknown platform!
+# endif /* NACL_BUILD_SUBARCH */
+#elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_arm
+# define NACL_ELF_E_MACHINE EM_ARM
+#elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_mips
+# define NACL_ELF_E_MACHINE EM_MIPS
+#else /* NACL_ARCH(NACL_BUILD_ARCH) */
+# error Unknown platform!
+#endif /* NACL_ARCH(NACL_BUILD_ARCH) */
+
+/* Copied from native_client/src/trusted/service_runtime/include/bits/mman.h */
+#define NACL_ABI_PROT_READ 0x1 /* Page can be read. */
+#define NACL_ABI_PROT_WRITE 0x2 /* Page can be written. */
+#define NACL_ABI_PROT_EXEC 0x4 /* Page can be executed. */
+#define NACL_ABI_PROT_NONE 0x0 /* Page can not be accessed. */
+#define NACL_ABI_MAP_PRIVATE 0x02 /* Changes are private. */
+#define NACL_ABI_MAP_FIXED 0x10 /* Interpret addr exactly. */
+
+/* Page size for Bare Metal Mode. */
+#define BARE_METAL_PAGE_SIZE 4096
+#define BARE_METAL_PAGE_MASK (BARE_METAL_PAGE_SIZE-1)
+
+/* private */
+struct NaClBareMetalElfImage {
+ Elf_Ehdr ehdr;
+ Elf_Phdr phdrs[NACL_BARE_METAL_MAX_PROGRAM_HEADERS];
+};
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+static void NaClDumpElfHeader(int loglevel, Elf_Ehdr *elf_hdr) {
+
+#define DUMP(m,f) do { NaClLog(loglevel, \
+ #m " = %" f "\n", \
+ elf_hdr->m); } while (0)
+
+ NaClLog(loglevel, "=================================================\n");
+ NaClLog(loglevel, "Elf header\n");
+ NaClLog(loglevel, "==================================================\n");
+
+ DUMP(e_ident+1, ".3s");
+ DUMP(e_type, "#x");
+ DUMP(e_machine, "#x");
+ DUMP(e_version, "#x");
+ DUMP(e_entry, "#"NACL_PRIxElf_Addr);
+ DUMP(e_phoff, "#"NACL_PRIxElf_Off);
+ DUMP(e_shoff, "#"NACL_PRIxElf_Off);
+ DUMP(e_flags, "#"NACL_PRIxElf_Word);
+ DUMP(e_ehsize, "#"NACL_PRIxElf_Half);
+ DUMP(e_phentsize, "#"NACL_PRIxElf_Half);
+ DUMP(e_phnum, "#"NACL_PRIxElf_Half);
+ DUMP(e_shentsize, "#"NACL_PRIxElf_Half);
+ DUMP(e_shnum, "#"NACL_PRIxElf_Half);
+ DUMP(e_shstrndx, "#"NACL_PRIxElf_Half);
+#undef DUMP
+ NaClLog(loglevel, "sizeof(Elf32_Ehdr) = 0x%x\n", (int) sizeof *elf_hdr);
+}
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+static void NaClDumpElfProgramHeader(int loglevel,
+ Elf_Phdr *phdr) {
+#define DUMP(mem, f) do { \
+ NaClLog(loglevel, "%s: %" f "\n", #mem, phdr->mem); \
+ } while (0)
+
+ DUMP(p_type, NACL_PRIxElf_Word);
+ DUMP(p_offset, NACL_PRIxElf_Off);
+ DUMP(p_vaddr, NACL_PRIxElf_Addr);
+ DUMP(p_paddr, NACL_PRIxElf_Addr);
+ DUMP(p_filesz, NACL_PRIxElf_Xword);
+ DUMP(p_memsz, NACL_PRIxElf_Xword);
+ DUMP(p_flags, NACL_PRIxElf_Word);
+ NaClLog(2, " (%s %s %s)\n",
+ (phdr->p_flags & PF_R) ? "PF_R" : "",
+ (phdr->p_flags & PF_W) ? "PF_W" : "",
+ (phdr->p_flags & PF_X) ? "PF_X" : "");
+ DUMP(p_align, NACL_PRIxElf_Xword);
+#undef DUMP
+ NaClLog(loglevel, "\n");
+}
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+static NaClBareMetalErrorCode NaClBareMetalElfImageValidateElfHeader(
+ struct NaClBareMetalElfImage *image) {
+ const Elf_Ehdr *hdr = &image->ehdr;
+
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG)) {
+ NaClLog(LOG_ERROR, "bad elf magic\n");
+ return BARE_METAL_LOAD_BAD_ELF_MAGIC;
+ }
+
+ if (ELFCLASS32 != hdr->e_ident[EI_CLASS]) {
+ NaClLog(LOG_ERROR, "bad elf class\n");
+ return BARE_METAL_LOAD_NOT_32_BIT;
+ }
+
+ if (ET_EXEC != hdr->e_type) {
Mark Seaborn 2013/12/06 03:21:16 This should require ET_DYN -- see my other comment
hidehiko 2013/12/06 17:40:02 Done.
+ NaClLog(LOG_ERROR, "non executable\n");
+ return BARE_METAL_LOAD_NOT_EXEC;
+ }
+
+ if (NACL_ELF_E_MACHINE != hdr->e_machine) {
+ NaClLog(LOG_ERROR, "bad machine: %"NACL_PRIxElf_Half"\n", hdr->e_machine);
+ return BARE_METAL_LOAD_BAD_MACHINE;
+ }
+
+ if (EV_CURRENT != hdr->e_version) {
+ NaClLog(LOG_ERROR, "bad elf version: %"NACL_PRIxElf_Word"\n",
+ hdr->e_version);
+ return BARE_METAL_LOAD_BAD_ELF_VERS;
+ }
+
+ return BARE_METAL_LOAD_OK;
+}
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+struct NaClBareMetalElfImage *NaClBareMetalElfImageNew(
+ struct NaClDesc *ndp,
+ NaClBareMetalErrorCode *err_code) {
+ ssize_t read_ret;
+ struct NaClBareMetalElfImage *result;
+ struct NaClBareMetalElfImage image;
+ union {
+ Elf32_Ehdr ehdr32;
+#if NACL_TARGET_SUBARCH == 64
+ Elf64_Ehdr ehdr64;
+#endif
+ } ehdr;
+ int cur_ph;
+
+ /*
+ * We read the larger size of an ELFCLASS64 header even if it turns out
+ * we're reading an ELFCLASS32 file. No usable ELFCLASS32 binary could
+ * be so small that it's not larger than Elf64_Ehdr anyway.
+ */
+ read_ret = (*NACL_VTBL(NaClDesc, ndp)->PRead)(ndp, &ehdr, sizeof ehdr, 0);
+ if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != sizeof ehdr) {
+ *err_code = BARE_METAL_LOAD_READ_ERROR;
+ NaClLog(2, "could not load elf headers\n");
+ return 0;
+ }
+
+#if NACL_TARGET_SUBARCH == 64
+ if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
+ /*
+ * Convert ELFCLASS64 format to ELFCLASS32 format.
+ * The initial four fields are the same in both classes.
+ */
+ memcpy(image.ehdr.e_ident, ehdr.ehdr64.e_ident, EI_NIDENT);
+ image.ehdr.e_ident[EI_CLASS] = ELFCLASS32;
+ image.ehdr.e_type = ehdr.ehdr64.e_type;
+ image.ehdr.e_machine = ehdr.ehdr64.e_machine;
+ image.ehdr.e_version = ehdr.ehdr64.e_version;
+ if (ehdr.ehdr64.e_entry > 0xffffffffU ||
+ ehdr.ehdr64.e_phoff > 0xffffffffU ||
+ ehdr.ehdr64.e_shoff > 0xffffffffU) {
+ *err_code = BARE_METAL_LOAD_EHDR_OVERFLOW;
+ NaClLog(2, "ELFCLASS64 file header fields overflow 32 bits\n");
+ return 0;
+ }
+ image.ehdr.e_entry = (Elf32_Addr) ehdr.ehdr64.e_entry;
+ image.ehdr.e_phoff = (Elf32_Off) ehdr.ehdr64.e_phoff;
+ image.ehdr.e_shoff = (Elf32_Off) ehdr.ehdr64.e_shoff;
+ image.ehdr.e_flags = ehdr.ehdr64.e_flags;
+ if (ehdr.ehdr64.e_ehsize != sizeof(ehdr.ehdr64)) {
+ *err_code = BARE_METAL_LOAD_BAD_EHSIZE;
+ NaClLog(2, "ELFCLASS64 file e_ehsize != %d\n", (int) sizeof(ehdr.ehdr64));
+ return 0;
+ }
+ image.ehdr.e_ehsize = sizeof(image.ehdr);
+ image.ehdr.e_phentsize = sizeof(image.phdrs[0]);
+ image.ehdr.e_phnum = ehdr.ehdr64.e_phnum;
+ image.ehdr.e_shentsize = ehdr.ehdr64.e_shentsize;
+ image.ehdr.e_shnum = ehdr.ehdr64.e_shnum;
+ image.ehdr.e_shstrndx = ehdr.ehdr64.e_shstrndx;
+ } else
+#endif
+ {
+ image.ehdr = ehdr.ehdr32;
+ }
+
+ NaClDumpElfHeader(2, &image.ehdr);
+
+ *err_code = NaClBareMetalElfImageValidateElfHeader(&image);
+ if (BARE_METAL_LOAD_OK != *err_code) {
+ return 0;
+ }
+
+ /* read program headers */
+ if (image.ehdr.e_phnum > NACL_BARE_METAL_MAX_PROGRAM_HEADERS) {
+ *err_code = BARE_METAL_LOAD_TOO_MANY_PROG_HDRS;
+ NaClLog(2, "too many prog headers\n");
+ return 0;
+ }
+
+#if NACL_TARGET_SUBARCH == 64
+ if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
+ /*
+ * We'll load the 64-bit phdrs and convert them to 32-bit format.
+ */
+ Elf64_Phdr phdr64[NACL_BARE_METAL_MAX_PROGRAM_HEADERS];
+
+ if (ehdr.ehdr64.e_phentsize != sizeof(Elf64_Phdr)) {
+ *err_code = BARE_METAL_LOAD_BAD_PHENTSIZE;
+ NaClLog(2, "bad prog headers size\n");
+ NaClLog(2, " ehdr64.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
+ ehdr.ehdr64.e_phentsize);
+ NaClLog(2, " sizeof(Elf64_Phdr) = 0x%"NACL_PRIxS"\n",
+ sizeof(Elf64_Phdr));
+ return 0;
+ }
+
+ /*
+ * We know the multiplication won't overflow since we rejected
+ * e_phnum values larger than the small constant NACL_MAX_PROGRAM_HEADERS.
+ */
+ read_ret = (*NACL_VTBL(NaClDesc, ndp)->
+ PRead)(ndp,
+ &phdr64[0],
+ image.ehdr.e_phnum * sizeof phdr64[0],
+ (nacl_off64_t) image.ehdr.e_phoff);
+ if (NaClSSizeIsNegErrno(&read_ret) ||
+ (size_t) read_ret != image.ehdr.e_phnum * sizeof phdr64[0]) {
+ *err_code = BARE_METAL_LOAD_READ_ERROR;
+ NaClLog(2, "cannot load tp prog headers\n");
+ return 0;
+ }
+
+ for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
+ if (phdr64[cur_ph].p_offset > 0xffffffffU ||
+ phdr64[cur_ph].p_vaddr > 0xffffffffU ||
+ phdr64[cur_ph].p_paddr > 0xffffffffU ||
+ phdr64[cur_ph].p_filesz > 0xffffffffU ||
+ phdr64[cur_ph].p_memsz > 0xffffffffU ||
+ phdr64[cur_ph].p_align > 0xffffffffU) {
+ *err_code = BARE_METAL_LOAD_PHDR_OVERFLOW;
+ NaClLog(2, "ELFCLASS64 program header fields overflow 32 bits\n");
+ return 0;
+ }
+ image.phdrs[cur_ph].p_type = phdr64[cur_ph].p_type;
+ image.phdrs[cur_ph].p_offset = (Elf32_Off) phdr64[cur_ph].p_offset;
+ image.phdrs[cur_ph].p_vaddr = (Elf32_Addr) phdr64[cur_ph].p_vaddr;
+ image.phdrs[cur_ph].p_paddr = (Elf32_Addr) phdr64[cur_ph].p_paddr;
+ image.phdrs[cur_ph].p_filesz = (Elf32_Word) phdr64[cur_ph].p_filesz;
+ image.phdrs[cur_ph].p_memsz = (Elf32_Word) phdr64[cur_ph].p_memsz;
+ image.phdrs[cur_ph].p_flags = phdr64[cur_ph].p_flags;
+ image.phdrs[cur_ph].p_align = (Elf32_Word) phdr64[cur_ph].p_align;
+ }
+ } else
+#endif
+ {
+ if (image.ehdr.e_phentsize != sizeof image.phdrs[0]) {
+ *err_code = BARE_METAL_LOAD_BAD_PHENTSIZE;
+ NaClLog(2, "bad prog headers size\n");
+ NaClLog(2, " image.ehdr.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
+ image.ehdr.e_phentsize);
+ NaClLog(2, " sizeof image.phdrs[0] = 0x%"NACL_PRIxS"\n",
+ sizeof image.phdrs[0]);
+ return 0;
+ }
+
+ read_ret = (*NACL_VTBL(NaClDesc, ndp)->
+ PRead)(ndp,
+ &image.phdrs[0],
+ image.ehdr.e_phnum * sizeof image.phdrs[0],
+ (nacl_off64_t) image.ehdr.e_phoff);
+ if (NaClSSizeIsNegErrno(&read_ret) ||
+ (size_t) read_ret != image.ehdr.e_phnum * sizeof image.phdrs[0]) {
+ *err_code = BARE_METAL_LOAD_READ_ERROR;
+ NaClLog(2, "cannot load tp prog headers\n");
+ return 0;
+ }
+ }
+
+ NaClLog(2, "=================================================\n");
+ NaClLog(2, "Elf Program headers\n");
+ NaClLog(2, "==================================================\n");
+ for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
+ NaClDumpElfProgramHeader(2, &image.phdrs[cur_ph]);
+ }
+
+ /* we delay allocating till the end to avoid cleanup code */
+ result = malloc(sizeof image);
+ if (result == 0) {
+ *err_code = BARE_METAL_LOAD_NO_MEMORY;
+ NaClLog(LOG_FATAL, "no enough memory for image meta data\n");
+ return 0;
+ }
+ memcpy(result, &image, sizeof image);
+ *err_code = BARE_METAL_LOAD_OK;
+ return result;
+}
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+void NaClBareMetalElfImageDelete(struct NaClBareMetalElfImage *image) {
+ free(image);
+}
+
+/* Copied from native_client/src/trusted/service_runtime/elf_util.c */
+uintptr_t NaClBareMetalElfImageGetEntryPoint(
+ struct NaClBareMetalElfImage *image) {
+ return image->ehdr.e_entry;
+}
+
+/*
+ * Returns the address of the page starting at address 'addr' for bare metal
+ * mode.
+ */
+static Elf32_Addr NaClBareMetalGetPageStart(Elf32_Addr addr) {
+ return addr & ~BARE_METAL_PAGE_MASK;
+}
+
+/*
+ * Returns the offset of address 'addr' in its memory page. In other words,
+ * this equals to 'addr' - NaClBareMetalGetPageStart(addr).
+ */
+static Elf32_Addr NaClBareMetalGetPageOffset(Elf32_Addr addr) {
+ return addr & BARE_METAL_PAGE_MASK;
+}
+
+/*
+ * Returns the address of the next page after address 'addr', unless 'addr' is
+ * at the start of a page. This equals to:
+ * addr == NaClBareMetalGetPageStart(addr) ?
+ * addr :
+ * NaClBareMetalGetPageStart(addr) + BARE_METAL_PAGE_SIZE;
+ */
+static Elf32_Addr NaClBareMetalGetPageEnd(Elf32_Addr addr) {
+ return NaClBareMetalGetPageStart(addr + BARE_METAL_PAGE_SIZE - 1);
+}
+
+/*
+ * Converts the pflags (in phdr) to mmap's prot flags.
+ */
+static int PFlagsToProt(int pflags) {
+ return ((pflags & PF_X) ? PROT_EXEC : 0) |
+ ((pflags & PF_R) ? PROT_READ : 0) |
+ ((pflags & PF_W) ? PROT_WRITE : 0);
+}
+
+/*
+ * Converts the pflags (in phdr) to NaCl ABI's prot flags.
+ */
+static int PFlagsToNaClProt(int pflags) {
+ return ((pflags & PF_X) ? NACL_ABI_PROT_EXEC : 0) |
+ ((pflags & PF_R) ? NACL_ABI_PROT_READ : 0) |
+ ((pflags & PF_W) ? NACL_ABI_PROT_WRITE : 0);
+}
+
+/*
+ * Returns the load size for the given phdrs, or 0 on error.
+ */
+static Elf32_Addr NaClBareMetalGetLoadSize(
+ const Elf32_Phdr* phdrs,
+ int phnum) {
+ int i;
+ Elf32_Addr begin = 0xFFFFFFFFU;
+ Elf32_Addr end = 0;
+ const Elf32_Phdr *phdr;
+ Elf32_Addr vaddr_end;
+
+ NaClLog(4, "NaClBareMetalGetLoadSize: phnum=%d\n", phnum);
+ for (i = 0; i < phnum; ++i) {
+ phdr = &phdrs[i];
+ if (phdr->p_type != PT_LOAD) {
+ /* Do nothing for non PT_LOAD header. */
+ continue;
+ }
+
+ if (begin > phdr->p_vaddr) {
+ begin = phdr->p_vaddr;
+ }
+
+ vaddr_end = phdr->p_vaddr + phdr->p_memsz;
+ if (end < vaddr_end) {
+ end = vaddr_end;
+ }
+ }
+
+ if (begin > end) {
+ /* The end address looks overflowing. */
+ return 0;
+ }
+
+ begin = NaClBareMetalGetPageStart(begin);
+ end = NaClBareMetalGetPageEnd(end);
+ return end - begin;
+}
+
+/*
+ * Reserves the memory for the given phdrs, and stores the memory address,
+ * its size and bias to the load_start, load_size and load_bias.
+ */
+static NaClBareMetalErrorCode NaClBareMetalReserveMemory(
+ const Elf32_Phdr *phdrs,
+ int phnum,
+ Elf32_Addr *load_bias) {
+ NaClLog(4, "NaClBareMetalReserveMemory\n");
+ int i;
+ void *first_loadable_segment = NULL;
+ int mmap_flags;
+ void *start;
+
+ Elf32_Addr size = NaClBareMetalGetLoadSize(phdrs, phnum);
+ if (size == 0) {
+ NaClLog(4,
+ "NaClBareMetalReserveMemory failed to calculate size\n");
+ return BARE_METAL_LOAD_UNLOADABLE;
+ }
+ NaClLog(4, "NaClBareMetalReserveMemory: size=%d\n", size);
+
+ for (i = 0; i < phnum; ++i) {
+ if (phdrs[i].p_type == PT_LOAD) {
+ first_loadable_segment = (void *)(phdrs[i].p_vaddr);
+ break;
+ }
+ }
+
+ mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (first_loadable_segment != 0) {
+ /* Here, the binary requires to be loaded onto fixed addressed memory. */
+ mmap_flags |= MAP_FIXED;
Mark Seaborn 2013/12/06 03:21:16 For the initial versions of Bare Metal Mode, this
hidehiko 2013/12/06 17:40:02 Done.
+ }
+ start = mmap(first_loadable_segment, size, PROT_NONE, mmap_flags, -1, 0);
+ if (start == MAP_FAILED) {
+ NaClLog(4, "NaClBareMetalReserveMemory: failed to mmap\n");
+ return BARE_METAL_LOAD_NO_MEMORY;
+ }
+
+ *load_bias = (Elf32_Addr)(
+ first_loadable_segment == 0 ? 0 :
+ start - NaClBareMetalGetPageStart((Elf32_Addr)first_loadable_segment));
+ NaClLog(4, "NaClBareMetalReserveMemory: success\n");
+ return BARE_METAL_LOAD_OK;
+}
+
+static NaClBareMetalErrorCode NaClBareMetalLoadSegments(
+ const Elf32_Phdr *phdrs,
+ int phnum,
+ Elf32_Addr load_bias,
+ struct NaClDesc *ndp) {
+ int i;
+ const Elf32_Phdr *phdr;
+ Elf32_Addr seg_start;
+ Elf32_Addr seg_end;
+ Elf32_Addr seg_page_start;
+ Elf32_Addr seg_page_end;
+ Elf32_Addr seg_file_end;
+ Elf32_Addr file_start;
+ Elf32_Addr file_end;
+ Elf32_Addr file_page_start;
+ Elf32_Addr seg_file_end_offset;
+ uintptr_t seg_addr;
+ void *zeromap;
+
+ for (i = 0; i < phnum; ++i) {
+ phdr = &phdrs[i];
+ if (phdr->p_type != PT_LOAD) {
+ NaClLog(4, "NaClBareMetalLoadSegments[%d]: Skipped\n", i);
+ /* Not a load target. */
+ continue;
+ }
+
+ NaClLog(4, "NaClBareMetalLoadSegments[%d]: Loading...\n", i);
+
+ seg_start = phdr->p_vaddr + load_bias;
+ seg_end = seg_start + phdr->p_memsz;
+ seg_page_start = NaClBareMetalGetPageStart(seg_start);
+ seg_page_end = NaClBareMetalGetPageEnd(seg_end);
+ seg_file_end = seg_start + phdr->p_filesz;
+
+ file_start = phdr->p_offset;
+ file_end = file_start + phdr->p_filesz;
+ file_page_start = NaClBareMetalGetPageStart(file_start);
+
+ seg_addr = (*NACL_VTBL(NaClDesc, ndp)->Map)(
+ ndp,
+ NaClDescEffectorTrustedMem(),
+ (void *) seg_page_start,
+ file_end - file_page_start,
+ PFlagsToNaClProt(phdr->p_flags),
+ NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
+ file_page_start);
+ if (NaClPtrIsNegErrno(&seg_addr)) {
+ NaClLog(
+ 4,
+ "NaClBareMetalLoadSegments[%d]: mmap failed, %"NACL_PRIdPTR".\n",
+ i, seg_addr);
+ return BARE_METAL_LOAD_NO_MEMORY;
+ }
+
+ /* Fill Zero between the segment end and the page boundary if necessary
+ (i.e. if the segment doesn't end on a page boundary) */
+ seg_file_end_offset = NaClBareMetalGetPageOffset(seg_file_end);
+ if ((phdr->p_flags & PF_W) && seg_file_end_offset > 0) {
+ memset((void *) seg_file_end, 0,
+ BARE_METAL_PAGE_SIZE - seg_file_end_offset);
+ }
+
+ /* seg_file_end is now the first page address after the file
+ * content. If seg_end is larger, we need to zero anything
+ * between them. This is done by using a private anonymous
+ * map for all extra pages.
+ */
+ seg_file_end = NaClBareMetalGetPageEnd(seg_file_end);
+ if (seg_page_end > seg_file_end) {
+ zeromap = mmap((void *) seg_file_end,
+ seg_page_end - seg_file_end,
+ PFlagsToProt(phdr->p_flags),
+ MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
+ -1,
+ 0);
+ if (zeromap == MAP_FAILED) {
+ NaClLog(4, "NaClBareMetalLoadSegments[%d]: Failed to zeromap.", i);
+ return BARE_METAL_LOAD_NO_MEMORY;
+ }
+ }
+ }
+ return BARE_METAL_LOAD_OK;
+}
+
+NaClBareMetalErrorCode NaClBareMetalElfImageLoad(
+ struct NaClBareMetalElfImage *image,
+ struct NaClDesc *ndp) {
+ NaClLog(3, "NaClBareMetalElfImageLoad\n");
+
+ Elf32_Addr load_bias = 0;
+ NaClBareMetalErrorCode error = NaClBareMetalReserveMemory(
+ image->phdrs, image->ehdr.e_phnum, &load_bias);
+ if (error != BARE_METAL_LOAD_OK) {
+ NaClLog(4, "NaClBareMetalElfImageLoad: failed to allocate memory.\n");
+ return error;
+ }
+ NaClLog(4,
+ "NaClBareMetalElfImageLoad: Loader maps the program to 0x%X",
+ load_bias);
+
+ error = NaClBareMetalLoadSegments(
+ image->phdrs, image->ehdr.e_phnum, load_bias, ndp);
+ if (error != BARE_METAL_LOAD_OK) {
+ NaClLog(4,
+ "NaClBareMetalElfImageLoad: Failed to load segments\n");
+ return error;
+ }
+
+ return BARE_METAL_LOAD_OK;
+}
« components/nacl/loader/bare_metal/elf_util.h ('K') | « components/nacl/loader/bare_metal/elf_util.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698