Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(613)

Side by Side Diff: components/nacl/loader/bare_metal/elf_util.c

Issue 100373005: Initial implementation of Bare Metal Mode for NaCl. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 /*
2 * Copyright 2013 The Chromium Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 #include "components/nacl/loader/bare_metal/elf_util.h"
8
9 #include <string.h>
10 #include <sys/mman.h>
11
12 #define NACL_LOG_MODULE_NAME "elf_util"
13
14 #include "native_client/src/include/elf.h"
15 #include "native_client/src/include/portability.h"
16 #include "native_client/src/shared/platform/nacl_host_desc.h"
17 #include "native_client/src/shared/platform/nacl_log.h"
18 #include "native_client/src/trusted/desc/nacl_desc_base.h"
19 #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
20
21 /* Limit of elf program headers allowed. */
22 #define NACL_BARE_METAL_MAX_PROGRAM_HEADERS 128
23
24 /* Copied from native_client/src/trusted/service_runtime/nacl_config.h */
25 #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86
26 # if NACL_BUILD_SUBARCH == 32
27 # define NACL_ELF_E_MACHINE EM_386
28 # elif NACL_BUILD_SUBARCH == 64
29 # define NACL_ELF_E_MACHINE EM_X86_64
30 # else /* NACL_BUILD_SUBARCH */
31 # error Unknown platform!
32 # endif /* NACL_BUILD_SUBARCH */
33 #elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_arm
34 # define NACL_ELF_E_MACHINE EM_ARM
35 #elif NACL_ARCH(NACL_BUILD_ARCH) == NACL_mips
36 # define NACL_ELF_E_MACHINE EM_MIPS
37 #else /* NACL_ARCH(NACL_BUILD_ARCH) */
38 # error Unknown platform!
39 #endif /* NACL_ARCH(NACL_BUILD_ARCH) */
40
41 /* Copied from native_client/src/trusted/service_runtime/include/bits/mman.h */
42 #define NACL_ABI_PROT_READ 0x1 /* Page can be read. */
43 #define NACL_ABI_PROT_WRITE 0x2 /* Page can be written. */
44 #define NACL_ABI_PROT_EXEC 0x4 /* Page can be executed. */
45 #define NACL_ABI_PROT_NONE 0x0 /* Page can not be accessed. */
46 #define NACL_ABI_MAP_PRIVATE 0x02 /* Changes are private. */
47 #define NACL_ABI_MAP_FIXED 0x10 /* Interpret addr exactly. */
48
49 /* Page size for Bare Metal Mode. */
50 #define BARE_METAL_PAGE_SIZE 4096
51 #define BARE_METAL_PAGE_MASK (BARE_METAL_PAGE_SIZE-1)
52
53 /* private */
54 struct NaClBareMetalElfImage {
55 Elf_Ehdr ehdr;
56 Elf_Phdr phdrs[NACL_BARE_METAL_MAX_PROGRAM_HEADERS];
57 };
58
59 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
60 static void NaClDumpElfHeader(int loglevel, Elf_Ehdr *elf_hdr) {
61
62 #define DUMP(m,f) do { NaClLog(loglevel, \
63 #m " = %" f "\n", \
64 elf_hdr->m); } while (0)
65
66 NaClLog(loglevel, "=================================================\n");
67 NaClLog(loglevel, "Elf header\n");
68 NaClLog(loglevel, "==================================================\n");
69
70 DUMP(e_ident+1, ".3s");
71 DUMP(e_type, "#x");
72 DUMP(e_machine, "#x");
73 DUMP(e_version, "#x");
74 DUMP(e_entry, "#"NACL_PRIxElf_Addr);
75 DUMP(e_phoff, "#"NACL_PRIxElf_Off);
76 DUMP(e_shoff, "#"NACL_PRIxElf_Off);
77 DUMP(e_flags, "#"NACL_PRIxElf_Word);
78 DUMP(e_ehsize, "#"NACL_PRIxElf_Half);
79 DUMP(e_phentsize, "#"NACL_PRIxElf_Half);
80 DUMP(e_phnum, "#"NACL_PRIxElf_Half);
81 DUMP(e_shentsize, "#"NACL_PRIxElf_Half);
82 DUMP(e_shnum, "#"NACL_PRIxElf_Half);
83 DUMP(e_shstrndx, "#"NACL_PRIxElf_Half);
84 #undef DUMP
85 NaClLog(loglevel, "sizeof(Elf32_Ehdr) = 0x%x\n", (int) sizeof *elf_hdr);
86 }
87
88 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
89 static void NaClDumpElfProgramHeader(int loglevel,
90 Elf_Phdr *phdr) {
91 #define DUMP(mem, f) do { \
92 NaClLog(loglevel, "%s: %" f "\n", #mem, phdr->mem); \
93 } while (0)
94
95 DUMP(p_type, NACL_PRIxElf_Word);
96 DUMP(p_offset, NACL_PRIxElf_Off);
97 DUMP(p_vaddr, NACL_PRIxElf_Addr);
98 DUMP(p_paddr, NACL_PRIxElf_Addr);
99 DUMP(p_filesz, NACL_PRIxElf_Xword);
100 DUMP(p_memsz, NACL_PRIxElf_Xword);
101 DUMP(p_flags, NACL_PRIxElf_Word);
102 NaClLog(2, " (%s %s %s)\n",
103 (phdr->p_flags & PF_R) ? "PF_R" : "",
104 (phdr->p_flags & PF_W) ? "PF_W" : "",
105 (phdr->p_flags & PF_X) ? "PF_X" : "");
106 DUMP(p_align, NACL_PRIxElf_Xword);
107 #undef DUMP
108 NaClLog(loglevel, "\n");
109 }
110
111 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
112 static NaClBareMetalErrorCode NaClBareMetalElfImageValidateElfHeader(
113 struct NaClBareMetalElfImage *image) {
114 const Elf_Ehdr *hdr = &image->ehdr;
115
116 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG)) {
117 NaClLog(LOG_ERROR, "bad elf magic\n");
118 return BARE_METAL_LOAD_BAD_ELF_MAGIC;
119 }
120
121 if (ELFCLASS32 != hdr->e_ident[EI_CLASS]) {
122 NaClLog(LOG_ERROR, "bad elf class\n");
123 return BARE_METAL_LOAD_NOT_32_BIT;
124 }
125
126 if (ET_EXEC != hdr->e_type) {
Mark Seaborn 2013/12/06 03:21:16 This should require ET_DYN -- see my other comment
hidehiko 2013/12/06 17:40:02 Done.
127 NaClLog(LOG_ERROR, "non executable\n");
128 return BARE_METAL_LOAD_NOT_EXEC;
129 }
130
131 if (NACL_ELF_E_MACHINE != hdr->e_machine) {
132 NaClLog(LOG_ERROR, "bad machine: %"NACL_PRIxElf_Half"\n", hdr->e_machine);
133 return BARE_METAL_LOAD_BAD_MACHINE;
134 }
135
136 if (EV_CURRENT != hdr->e_version) {
137 NaClLog(LOG_ERROR, "bad elf version: %"NACL_PRIxElf_Word"\n",
138 hdr->e_version);
139 return BARE_METAL_LOAD_BAD_ELF_VERS;
140 }
141
142 return BARE_METAL_LOAD_OK;
143 }
144
145 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
146 struct NaClBareMetalElfImage *NaClBareMetalElfImageNew(
147 struct NaClDesc *ndp,
148 NaClBareMetalErrorCode *err_code) {
149 ssize_t read_ret;
150 struct NaClBareMetalElfImage *result;
151 struct NaClBareMetalElfImage image;
152 union {
153 Elf32_Ehdr ehdr32;
154 #if NACL_TARGET_SUBARCH == 64
155 Elf64_Ehdr ehdr64;
156 #endif
157 } ehdr;
158 int cur_ph;
159
160 /*
161 * We read the larger size of an ELFCLASS64 header even if it turns out
162 * we're reading an ELFCLASS32 file. No usable ELFCLASS32 binary could
163 * be so small that it's not larger than Elf64_Ehdr anyway.
164 */
165 read_ret = (*NACL_VTBL(NaClDesc, ndp)->PRead)(ndp, &ehdr, sizeof ehdr, 0);
166 if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != sizeof ehdr) {
167 *err_code = BARE_METAL_LOAD_READ_ERROR;
168 NaClLog(2, "could not load elf headers\n");
169 return 0;
170 }
171
172 #if NACL_TARGET_SUBARCH == 64
173 if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
174 /*
175 * Convert ELFCLASS64 format to ELFCLASS32 format.
176 * The initial four fields are the same in both classes.
177 */
178 memcpy(image.ehdr.e_ident, ehdr.ehdr64.e_ident, EI_NIDENT);
179 image.ehdr.e_ident[EI_CLASS] = ELFCLASS32;
180 image.ehdr.e_type = ehdr.ehdr64.e_type;
181 image.ehdr.e_machine = ehdr.ehdr64.e_machine;
182 image.ehdr.e_version = ehdr.ehdr64.e_version;
183 if (ehdr.ehdr64.e_entry > 0xffffffffU ||
184 ehdr.ehdr64.e_phoff > 0xffffffffU ||
185 ehdr.ehdr64.e_shoff > 0xffffffffU) {
186 *err_code = BARE_METAL_LOAD_EHDR_OVERFLOW;
187 NaClLog(2, "ELFCLASS64 file header fields overflow 32 bits\n");
188 return 0;
189 }
190 image.ehdr.e_entry = (Elf32_Addr) ehdr.ehdr64.e_entry;
191 image.ehdr.e_phoff = (Elf32_Off) ehdr.ehdr64.e_phoff;
192 image.ehdr.e_shoff = (Elf32_Off) ehdr.ehdr64.e_shoff;
193 image.ehdr.e_flags = ehdr.ehdr64.e_flags;
194 if (ehdr.ehdr64.e_ehsize != sizeof(ehdr.ehdr64)) {
195 *err_code = BARE_METAL_LOAD_BAD_EHSIZE;
196 NaClLog(2, "ELFCLASS64 file e_ehsize != %d\n", (int) sizeof(ehdr.ehdr64));
197 return 0;
198 }
199 image.ehdr.e_ehsize = sizeof(image.ehdr);
200 image.ehdr.e_phentsize = sizeof(image.phdrs[0]);
201 image.ehdr.e_phnum = ehdr.ehdr64.e_phnum;
202 image.ehdr.e_shentsize = ehdr.ehdr64.e_shentsize;
203 image.ehdr.e_shnum = ehdr.ehdr64.e_shnum;
204 image.ehdr.e_shstrndx = ehdr.ehdr64.e_shstrndx;
205 } else
206 #endif
207 {
208 image.ehdr = ehdr.ehdr32;
209 }
210
211 NaClDumpElfHeader(2, &image.ehdr);
212
213 *err_code = NaClBareMetalElfImageValidateElfHeader(&image);
214 if (BARE_METAL_LOAD_OK != *err_code) {
215 return 0;
216 }
217
218 /* read program headers */
219 if (image.ehdr.e_phnum > NACL_BARE_METAL_MAX_PROGRAM_HEADERS) {
220 *err_code = BARE_METAL_LOAD_TOO_MANY_PROG_HDRS;
221 NaClLog(2, "too many prog headers\n");
222 return 0;
223 }
224
225 #if NACL_TARGET_SUBARCH == 64
226 if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
227 /*
228 * We'll load the 64-bit phdrs and convert them to 32-bit format.
229 */
230 Elf64_Phdr phdr64[NACL_BARE_METAL_MAX_PROGRAM_HEADERS];
231
232 if (ehdr.ehdr64.e_phentsize != sizeof(Elf64_Phdr)) {
233 *err_code = BARE_METAL_LOAD_BAD_PHENTSIZE;
234 NaClLog(2, "bad prog headers size\n");
235 NaClLog(2, " ehdr64.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
236 ehdr.ehdr64.e_phentsize);
237 NaClLog(2, " sizeof(Elf64_Phdr) = 0x%"NACL_PRIxS"\n",
238 sizeof(Elf64_Phdr));
239 return 0;
240 }
241
242 /*
243 * We know the multiplication won't overflow since we rejected
244 * e_phnum values larger than the small constant NACL_MAX_PROGRAM_HEADERS.
245 */
246 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
247 PRead)(ndp,
248 &phdr64[0],
249 image.ehdr.e_phnum * sizeof phdr64[0],
250 (nacl_off64_t) image.ehdr.e_phoff);
251 if (NaClSSizeIsNegErrno(&read_ret) ||
252 (size_t) read_ret != image.ehdr.e_phnum * sizeof phdr64[0]) {
253 *err_code = BARE_METAL_LOAD_READ_ERROR;
254 NaClLog(2, "cannot load tp prog headers\n");
255 return 0;
256 }
257
258 for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
259 if (phdr64[cur_ph].p_offset > 0xffffffffU ||
260 phdr64[cur_ph].p_vaddr > 0xffffffffU ||
261 phdr64[cur_ph].p_paddr > 0xffffffffU ||
262 phdr64[cur_ph].p_filesz > 0xffffffffU ||
263 phdr64[cur_ph].p_memsz > 0xffffffffU ||
264 phdr64[cur_ph].p_align > 0xffffffffU) {
265 *err_code = BARE_METAL_LOAD_PHDR_OVERFLOW;
266 NaClLog(2, "ELFCLASS64 program header fields overflow 32 bits\n");
267 return 0;
268 }
269 image.phdrs[cur_ph].p_type = phdr64[cur_ph].p_type;
270 image.phdrs[cur_ph].p_offset = (Elf32_Off) phdr64[cur_ph].p_offset;
271 image.phdrs[cur_ph].p_vaddr = (Elf32_Addr) phdr64[cur_ph].p_vaddr;
272 image.phdrs[cur_ph].p_paddr = (Elf32_Addr) phdr64[cur_ph].p_paddr;
273 image.phdrs[cur_ph].p_filesz = (Elf32_Word) phdr64[cur_ph].p_filesz;
274 image.phdrs[cur_ph].p_memsz = (Elf32_Word) phdr64[cur_ph].p_memsz;
275 image.phdrs[cur_ph].p_flags = phdr64[cur_ph].p_flags;
276 image.phdrs[cur_ph].p_align = (Elf32_Word) phdr64[cur_ph].p_align;
277 }
278 } else
279 #endif
280 {
281 if (image.ehdr.e_phentsize != sizeof image.phdrs[0]) {
282 *err_code = BARE_METAL_LOAD_BAD_PHENTSIZE;
283 NaClLog(2, "bad prog headers size\n");
284 NaClLog(2, " image.ehdr.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
285 image.ehdr.e_phentsize);
286 NaClLog(2, " sizeof image.phdrs[0] = 0x%"NACL_PRIxS"\n",
287 sizeof image.phdrs[0]);
288 return 0;
289 }
290
291 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
292 PRead)(ndp,
293 &image.phdrs[0],
294 image.ehdr.e_phnum * sizeof image.phdrs[0],
295 (nacl_off64_t) image.ehdr.e_phoff);
296 if (NaClSSizeIsNegErrno(&read_ret) ||
297 (size_t) read_ret != image.ehdr.e_phnum * sizeof image.phdrs[0]) {
298 *err_code = BARE_METAL_LOAD_READ_ERROR;
299 NaClLog(2, "cannot load tp prog headers\n");
300 return 0;
301 }
302 }
303
304 NaClLog(2, "=================================================\n");
305 NaClLog(2, "Elf Program headers\n");
306 NaClLog(2, "==================================================\n");
307 for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
308 NaClDumpElfProgramHeader(2, &image.phdrs[cur_ph]);
309 }
310
311 /* we delay allocating till the end to avoid cleanup code */
312 result = malloc(sizeof image);
313 if (result == 0) {
314 *err_code = BARE_METAL_LOAD_NO_MEMORY;
315 NaClLog(LOG_FATAL, "no enough memory for image meta data\n");
316 return 0;
317 }
318 memcpy(result, &image, sizeof image);
319 *err_code = BARE_METAL_LOAD_OK;
320 return result;
321 }
322
323 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
324 void NaClBareMetalElfImageDelete(struct NaClBareMetalElfImage *image) {
325 free(image);
326 }
327
328 /* Copied from native_client/src/trusted/service_runtime/elf_util.c */
329 uintptr_t NaClBareMetalElfImageGetEntryPoint(
330 struct NaClBareMetalElfImage *image) {
331 return image->ehdr.e_entry;
332 }
333
334 /*
335 * Returns the address of the page starting at address 'addr' for bare metal
336 * mode.
337 */
338 static Elf32_Addr NaClBareMetalGetPageStart(Elf32_Addr addr) {
339 return addr & ~BARE_METAL_PAGE_MASK;
340 }
341
342 /*
343 * Returns the offset of address 'addr' in its memory page. In other words,
344 * this equals to 'addr' - NaClBareMetalGetPageStart(addr).
345 */
346 static Elf32_Addr NaClBareMetalGetPageOffset(Elf32_Addr addr) {
347 return addr & BARE_METAL_PAGE_MASK;
348 }
349
350 /*
351 * Returns the address of the next page after address 'addr', unless 'addr' is
352 * at the start of a page. This equals to:
353 * addr == NaClBareMetalGetPageStart(addr) ?
354 * addr :
355 * NaClBareMetalGetPageStart(addr) + BARE_METAL_PAGE_SIZE;
356 */
357 static Elf32_Addr NaClBareMetalGetPageEnd(Elf32_Addr addr) {
358 return NaClBareMetalGetPageStart(addr + BARE_METAL_PAGE_SIZE - 1);
359 }
360
361 /*
362 * Converts the pflags (in phdr) to mmap's prot flags.
363 */
364 static int PFlagsToProt(int pflags) {
365 return ((pflags & PF_X) ? PROT_EXEC : 0) |
366 ((pflags & PF_R) ? PROT_READ : 0) |
367 ((pflags & PF_W) ? PROT_WRITE : 0);
368 }
369
370 /*
371 * Converts the pflags (in phdr) to NaCl ABI's prot flags.
372 */
373 static int PFlagsToNaClProt(int pflags) {
374 return ((pflags & PF_X) ? NACL_ABI_PROT_EXEC : 0) |
375 ((pflags & PF_R) ? NACL_ABI_PROT_READ : 0) |
376 ((pflags & PF_W) ? NACL_ABI_PROT_WRITE : 0);
377 }
378
379 /*
380 * Returns the load size for the given phdrs, or 0 on error.
381 */
382 static Elf32_Addr NaClBareMetalGetLoadSize(
383 const Elf32_Phdr* phdrs,
384 int phnum) {
385 int i;
386 Elf32_Addr begin = 0xFFFFFFFFU;
387 Elf32_Addr end = 0;
388 const Elf32_Phdr *phdr;
389 Elf32_Addr vaddr_end;
390
391 NaClLog(4, "NaClBareMetalGetLoadSize: phnum=%d\n", phnum);
392 for (i = 0; i < phnum; ++i) {
393 phdr = &phdrs[i];
394 if (phdr->p_type != PT_LOAD) {
395 /* Do nothing for non PT_LOAD header. */
396 continue;
397 }
398
399 if (begin > phdr->p_vaddr) {
400 begin = phdr->p_vaddr;
401 }
402
403 vaddr_end = phdr->p_vaddr + phdr->p_memsz;
404 if (end < vaddr_end) {
405 end = vaddr_end;
406 }
407 }
408
409 if (begin > end) {
410 /* The end address looks overflowing. */
411 return 0;
412 }
413
414 begin = NaClBareMetalGetPageStart(begin);
415 end = NaClBareMetalGetPageEnd(end);
416 return end - begin;
417 }
418
419 /*
420 * Reserves the memory for the given phdrs, and stores the memory address,
421 * its size and bias to the load_start, load_size and load_bias.
422 */
423 static NaClBareMetalErrorCode NaClBareMetalReserveMemory(
424 const Elf32_Phdr *phdrs,
425 int phnum,
426 Elf32_Addr *load_bias) {
427 NaClLog(4, "NaClBareMetalReserveMemory\n");
428 int i;
429 void *first_loadable_segment = NULL;
430 int mmap_flags;
431 void *start;
432
433 Elf32_Addr size = NaClBareMetalGetLoadSize(phdrs, phnum);
434 if (size == 0) {
435 NaClLog(4,
436 "NaClBareMetalReserveMemory failed to calculate size\n");
437 return BARE_METAL_LOAD_UNLOADABLE;
438 }
439 NaClLog(4, "NaClBareMetalReserveMemory: size=%d\n", size);
440
441 for (i = 0; i < phnum; ++i) {
442 if (phdrs[i].p_type == PT_LOAD) {
443 first_loadable_segment = (void *)(phdrs[i].p_vaddr);
444 break;
445 }
446 }
447
448 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
449 if (first_loadable_segment != 0) {
450 /* Here, the binary requires to be loaded onto fixed addressed memory. */
451 mmap_flags |= MAP_FIXED;
Mark Seaborn 2013/12/06 03:21:16 For the initial versions of Bare Metal Mode, this
hidehiko 2013/12/06 17:40:02 Done.
452 }
453 start = mmap(first_loadable_segment, size, PROT_NONE, mmap_flags, -1, 0);
454 if (start == MAP_FAILED) {
455 NaClLog(4, "NaClBareMetalReserveMemory: failed to mmap\n");
456 return BARE_METAL_LOAD_NO_MEMORY;
457 }
458
459 *load_bias = (Elf32_Addr)(
460 first_loadable_segment == 0 ? 0 :
461 start - NaClBareMetalGetPageStart((Elf32_Addr)first_loadable_segment));
462 NaClLog(4, "NaClBareMetalReserveMemory: success\n");
463 return BARE_METAL_LOAD_OK;
464 }
465
466 static NaClBareMetalErrorCode NaClBareMetalLoadSegments(
467 const Elf32_Phdr *phdrs,
468 int phnum,
469 Elf32_Addr load_bias,
470 struct NaClDesc *ndp) {
471 int i;
472 const Elf32_Phdr *phdr;
473 Elf32_Addr seg_start;
474 Elf32_Addr seg_end;
475 Elf32_Addr seg_page_start;
476 Elf32_Addr seg_page_end;
477 Elf32_Addr seg_file_end;
478 Elf32_Addr file_start;
479 Elf32_Addr file_end;
480 Elf32_Addr file_page_start;
481 Elf32_Addr seg_file_end_offset;
482 uintptr_t seg_addr;
483 void *zeromap;
484
485 for (i = 0; i < phnum; ++i) {
486 phdr = &phdrs[i];
487 if (phdr->p_type != PT_LOAD) {
488 NaClLog(4, "NaClBareMetalLoadSegments[%d]: Skipped\n", i);
489 /* Not a load target. */
490 continue;
491 }
492
493 NaClLog(4, "NaClBareMetalLoadSegments[%d]: Loading...\n", i);
494
495 seg_start = phdr->p_vaddr + load_bias;
496 seg_end = seg_start + phdr->p_memsz;
497 seg_page_start = NaClBareMetalGetPageStart(seg_start);
498 seg_page_end = NaClBareMetalGetPageEnd(seg_end);
499 seg_file_end = seg_start + phdr->p_filesz;
500
501 file_start = phdr->p_offset;
502 file_end = file_start + phdr->p_filesz;
503 file_page_start = NaClBareMetalGetPageStart(file_start);
504
505 seg_addr = (*NACL_VTBL(NaClDesc, ndp)->Map)(
506 ndp,
507 NaClDescEffectorTrustedMem(),
508 (void *) seg_page_start,
509 file_end - file_page_start,
510 PFlagsToNaClProt(phdr->p_flags),
511 NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
512 file_page_start);
513 if (NaClPtrIsNegErrno(&seg_addr)) {
514 NaClLog(
515 4,
516 "NaClBareMetalLoadSegments[%d]: mmap failed, %"NACL_PRIdPTR".\n",
517 i, seg_addr);
518 return BARE_METAL_LOAD_NO_MEMORY;
519 }
520
521 /* Fill Zero between the segment end and the page boundary if necessary
522 (i.e. if the segment doesn't end on a page boundary) */
523 seg_file_end_offset = NaClBareMetalGetPageOffset(seg_file_end);
524 if ((phdr->p_flags & PF_W) && seg_file_end_offset > 0) {
525 memset((void *) seg_file_end, 0,
526 BARE_METAL_PAGE_SIZE - seg_file_end_offset);
527 }
528
529 /* seg_file_end is now the first page address after the file
530 * content. If seg_end is larger, we need to zero anything
531 * between them. This is done by using a private anonymous
532 * map for all extra pages.
533 */
534 seg_file_end = NaClBareMetalGetPageEnd(seg_file_end);
535 if (seg_page_end > seg_file_end) {
536 zeromap = mmap((void *) seg_file_end,
537 seg_page_end - seg_file_end,
538 PFlagsToProt(phdr->p_flags),
539 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
540 -1,
541 0);
542 if (zeromap == MAP_FAILED) {
543 NaClLog(4, "NaClBareMetalLoadSegments[%d]: Failed to zeromap.", i);
544 return BARE_METAL_LOAD_NO_MEMORY;
545 }
546 }
547 }
548 return BARE_METAL_LOAD_OK;
549 }
550
551 NaClBareMetalErrorCode NaClBareMetalElfImageLoad(
552 struct NaClBareMetalElfImage *image,
553 struct NaClDesc *ndp) {
554 NaClLog(3, "NaClBareMetalElfImageLoad\n");
555
556 Elf32_Addr load_bias = 0;
557 NaClBareMetalErrorCode error = NaClBareMetalReserveMemory(
558 image->phdrs, image->ehdr.e_phnum, &load_bias);
559 if (error != BARE_METAL_LOAD_OK) {
560 NaClLog(4, "NaClBareMetalElfImageLoad: failed to allocate memory.\n");
561 return error;
562 }
563 NaClLog(4,
564 "NaClBareMetalElfImageLoad: Loader maps the program to 0x%X",
565 load_bias);
566
567 error = NaClBareMetalLoadSegments(
568 image->phdrs, image->ehdr.e_phnum, load_bias, ndp);
569 if (error != BARE_METAL_LOAD_OK) {
570 NaClLog(4,
571 "NaClBareMetalElfImageLoad: Failed to load segments\n");
572 return error;
573 }
574
575 return BARE_METAL_LOAD_OK;
576 }
OLDNEW
« components/nacl/loader/bare_metal/elf_util.h ('K') | « components/nacl/loader/bare_metal/elf_util.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698