Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(364)

Side by Side Diff: chrome/nacl/nacl_helper_bootstrap_linux.c

Issue 7795010: Use chain-loading for Linux nacl_helper (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 /* Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be 2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file. 3 * found in the LICENSE file.
4 * 4 *
5 * Bootstraping the nacl_helper. This executable reserves the bottom 1G 5 * This is a standalone program that loads and run the dynamic linker.
Brad Chen 2011/08/29 23:04:57 'runs'
6 * of the address space, then invokes nacl_helper_init. Note that, 6 * This program itself must be linked statically. To keep it small, it's
7 * as the text of this executable will eventually be overwritten by the 7 * written to avoid all dependencies on libc and standard startup code.
8 * native_client module, nacl_helper_init must not attempt to return. 8 * Hence, this should be linked using -nostartfiles. It must be compiled
9 */ 9 * with -fno-stack-protector to ensure the compiler won't emit code that
10 10 * presumes some special setup has been done.
11 #include <stdlib.h> 11 *
12 12 * On ARM, the compiler will emit calls to some libc functions, so we
13 /* reserve 1GB of space */ 13 * cannot link with -nostdlib. The functions it does use (memset and
14 #define ONEGIG (1 << 30) 14 * __aeabi_* functions for integer division) are sufficiently small and
15 char nacl_reserved_space[ONEGIG]; 15 * self-contained in ARM's libc.a that we don't have any problem using
16 16 * the libc definitions though we aren't using the rest of libc or doing
17 void nacl_helper_init(int argc, char *argv[], 17 * any of the setup it might expect.
18 const char *nacl_reserved_space); 18 */
19 19
20 int main(int argc, char *argv[]) { 20 #include <asm/unistd.h>
21 nacl_helper_init(argc, argv, nacl_reserved_space); 21 #include <elf.h>
22 abort(); 22 #include <fcntl.h>
23 return 0; // convince the tools I'm sane. 23 #include <link.h>
24 } 24 #include <stddef.h>
25 #include <stdint.h>
26 #include <sys/mman.h>
27 #include <sys/types.h>
28 #include <sys/uio.h>
29
30 #define TEXT_START_ADDRESS 0x10000
31 #define ADDRESS_SPACE_RESERVE (1 << 30) /* one gigabyte */
32 #define BSS_RESERVE_SIZE (ADDRESS_SPACE_RESERVE - TEXT_START_ADDRESS)
33
34 #if defined(__i386__)
35 # define DYNAMIC_LINKER "/lib/ld-linux.so.2"
36 #elif defined(__x86_64__)
37 # define DYNAMIC_LINKER "/lib64/ld-linux-x86-64.so.2"
38 #elif defined(__ARM_EABI__)
39 # define DYNAMIC_LINKER "/lib/ld-linux.so.3"
40 #else
41 # error "Don't know the dynamic linker file name for this architecture!"
42 #endif
43
44 /*
45 * We're not using <string.h> functions here to avoid dependencies.
46 * In the x86 libc, even "simple" functions like memset and strlen can
47 * depend on complex startup code, because in newer libc
48 * implementations they are defined using STT_GNU_IFUNC.
49 */
50
51 static void my_bzero(void *buf, size_t n) {
52 char *p = buf;
53 while (n-- > 0)
54 *p++ = 0;
55 }
56
57 static size_t my_strlen(const char *s) {
58 size_t n = 0;
59 while (*s++ != '\0')
60 ++n;
61 return n;
62 }
63
64
65 /*
66 * Forward declaration.
67 */
68 static void fail(const char *message,
69 const char *item1, int value1,
70 const char *item2, int value2) __attribute__((noreturn));
71
72 static void open_failure(int error) {
73 fail("Cannot open dynamic linker! ", "errno", error, NULL, 0);
74 }
75
76 static void check_pread(const char *fail_message, size_t bufsz,
77 int error, size_t read_count) {
78 if (error != 0)
79 fail(fail_message, "errno", error, NULL, 0);
80 if (read_count != bufsz)
81 fail(fail_message, "read count", read_count, NULL, 0);
82 }
83
84 static void mmap_failure(const char *segment_type, unsigned int segnum,
85 int error) {
86 fail("Failed to map from dynamic linker! ",
87 segment_type, segnum, "errno", error);
88 }
89
90 static void mprotect_failure(unsigned int segnum, int error) {
91 fail("Failed to mprotect hole in dynamic linker! ",
92 "segment", segnum, "errno", error);
93 }
94
95
96 /*
97 * Hand-rolled system call stubs for the few things we use.
98 */
99
100 #if defined(__i386__)
Brad Chen 2011/08/29 23:04:57 I tend to agree with Mark that these syscall stubs
101
102 __attribute__((noreturn)) static void my_exit(int status) {
103 asm volatile("int $0x80"
Mark Seaborn 2011/08/29 22:26:50 The Chromium tree already has code for doing sysca
104 :: "a" (__NR_exit_group), "b" (status)
105 : "cc");
106 while (1) *(volatile int *) 0 = 0;
107 }
108
109 static void my_writev(int fd, const struct iovec *iov, int niov) {
110 asm volatile("int $0x80"
111 :: "a" (__NR_writev), "b" (fd), "c" (iov), "d" (niov)
112 : "cc");
113 }
114
115 static int my_open(const char *file, int oflag) {
116 int result;
117 asm volatile("int $0x80"
118 : "=a" (result)
119 : "0" (__NR_open), "b" (file), "c" (oflag)
120 : "cc");
121 if (result < 0)
122 open_failure(-result);
123 return result;
124 }
125
126 static void my_close(int fd) {
127 int result;
128 asm volatile("int $0x80"
129 : "=a" (result)
130 : "0" (__NR_close), "b" (fd)
131 : "cc");
132 }
133
134 static void my_pread(const char *fail_message,
135 int fd, void *buf, size_t bufsz, uintptr_t pos) {
136 int result;
137 /*
138 * Ideally we would use a memory output operand rather than the
139 * "memory" clobber. But the compiler can't handle that here,
140 * because it makes it want to use too many registers.
141 */
142 asm volatile("int $0x80"
143 : "=a" (result)
144 : "0" (__NR_pread64), "b" (fd), "c" (buf), "d" (bufsz),
145 "S" (pos), "D" (0)
146 : "cc", "memory");
147 check_pread(fail_message, bufsz, result < 0 ? -result : 0, result);
148 }
149
150 static uintptr_t my_mmap(const char *segment_type, unsigned int segnum,
151 uintptr_t address, size_t size, int prot, int flags,
152 int fd, uint32_t pos) {
153 uintptr_t result;
154 const struct {
155 unsigned int args[6];
156 } args = {
157 { address, size, prot, flags, fd, pos }
158 };
159 asm volatile("int $0x80"
160 : "=a" (result)
161 : "0" (__NR_mmap), "b" (&args), "m" (args)
162 : "cc");
163 if (result > (uintptr_t) -4096)
164 mmap_failure(segment_type, segnum, -result);
165 return result;
166 }
167
168 static void my_mprotect(unsigned int segnum,
169 uintptr_t address, size_t size, int prot) {
170 int result;
171 asm volatile("int $0x80"
172 : "=a" (result)
173 : "b" (address), "c" (size), "d" (prot)
174 : "cc");
175 if (result < 0)
176 mprotect_failure(segnum, -result);
177 }
178
179 #elif defined(__x86_64__)
180
181 __attribute__((noreturn)) static void my_exit(int status) {
182 asm volatile("syscall"
183 :: "a" (__NR_exit_group), "D" (status)
184 : "cc", "rcx", "r11");
185 while (1) *(volatile int *) 0 = 0;
186 }
187
188 static void my_writev(int fd, const struct iovec *iov, int niov) {
189 asm volatile("syscall"
190 :: "a" (__NR_writev), "D" (fd), "S" (iov), "d" (niov)
191 : "cc", "rcx", "r11");
192 }
193
194 static int my_open(const char *file, int oflag) {
195 int result;
196 asm volatile("syscall"
197 : "=a" (result)
198 : "0" (__NR_open), "D" (file), "S" (oflag)
199 : "cc", "rcx", "r11");
200 if (result < 0)
201 open_failure(-result);
202 return result;
203 }
204
205 static void my_close(int fd) {
206 int result;
207 asm volatile("syscall"
208 : "=a" (result)
209 : "0" (__NR_close), "D" (fd)
210 : "cc", "rcx", "r11");
211 }
212
213 static void my_pread(const char *fail_message,
214 int fd, void *buf, size_t bufsz, uintptr_t pos) {
215 int result;
216 asm volatile("mov %6, %%r10; syscall"
217 : "=a" (result), "=m" (*(struct { char b[bufsz]; } *) buf)
218 : "0" (__NR_pread64), "D" (fd), "S" (buf), "d" (bufsz), "g" (pos)
219 : "cc", "rcx", "r11", "r10");
220 check_pread(fail_message, bufsz, result < 0 ? -result : 0, result);
221 }
222
223 static uintptr_t my_mmap(const char *segment_type, unsigned int segnum,
224 uintptr_t address, size_t size, int prot, int flags,
225 int fd, off_t pos) {
226 uintptr_t result;
227 asm volatile("mov %5, %%r10; mov %6, %%r8; mov %7, %%r9; syscall"
228 : "=a" (result)
229 : "0" (__NR_mmap), "D" (address), "S" (size),
230 "d" ((long int) prot), "g" ((long int) flags),
231 "g" ((long int) fd), "g" (pos)
232 : "cc", "rcx", "r11", "r10", "r9", "r8");
233 if (result > (uintptr_t) -4096)
234 mmap_failure(segment_type, segnum, -result);
235 return result;
236 }
237
238 static void my_mprotect(unsigned int segnum,
239 uintptr_t address, size_t size, int prot) {
240 int result;
241 asm volatile("syscall"
242 : "=a" (result)
243 : "0" (__NR_mprotect), "D" (address), "S" (size), "d" (prot)
244 : "cc", "rcx", "r11");
245 if (result < 0)
246 mprotect_failure(segnum, -result);
247 }
248
249 #elif defined(__ARM_EABI__)
250
251 /*
252 * We must touch r7 behind the compiler's back when compiling for
253 * Thumb. r7 is the Thumb frame pointer and the compiler won't honor
254 * our private use of it without getting confused and clobbering
255 * things. Since a few extra instructions here don't hurt, we just
256 * use the same method for non-Thumb builds too.
257 */
258 #define SYSCALL_ASM \
259 "mov %[scratch], r7\n" \
260 "mov r7, %[nr]\n" \
261 "swi 0\n" \
262 "mov r7, %[scratch]"
263
264 __attribute__((noreturn)) static void my_exit(int status) {
265 int scratch;
266 register int a1 asm ("a1") = status;
267 asm volatile(SYSCALL_ASM
268 : [scratch] "=&r" (scratch)
269 : [nr] "i" (__NR_exit_group), "r" (a1));
270 while (1) *(volatile int *) 0 = 0;
271 }
272
273 static void my_writev(int fd, const struct iovec *iov, int niov) {
274 int scratch;
275 register int a1 asm ("a1") = fd;
276 register int a2 asm ("a2") = (uintptr_t) iov;
277 register int a3 asm ("a3") = niov;
278 asm volatile(SYSCALL_ASM
279 : [scratch] "=&r" (scratch), "=r" (a1)
280 : [nr] "i" (__NR_writev), "r" (a1), "r" (a2), "r" (a3));
281 }
282
283 static int my_open(const char *file, int oflag) {
284 int scratch;
285 register int result asm ("a1") = (uintptr_t) file;
286 register int a2 asm ("a2") = oflag;
287 asm volatile(SYSCALL_ASM
288 : [scratch] "=&r" (scratch), "=r" (result)
289 : [nr] "i" (__NR_open), "r" (result), "r" (a2));
290 if (result < 0)
291 open_failure(-result);
292 return result;
293 }
294
295 static void my_close(int fd) {
296 int scratch;
297 register int result asm ("a1") = fd;
298 asm volatile(SYSCALL_ASM
299 : [scratch] "=&r" (scratch), "=r" (result)
300 : [nr] "i" (__NR_close), "r" (result));
301 }
302
303 static void my_pread(const char *fail_message,
304 int fd, void *buf, size_t bufsz, uintptr_t pos) {
305 int scratch;
306 register int result asm ("a1") = fd;
307 register int a2 asm ("a2") = (uintptr_t) buf;
308 register int a3 asm ("a3") = bufsz;
309 register int a4 asm ("a4") = 0;
310 register int v1 asm ("v1");
311 register int v2 asm ("v2");
312 #if defined(__ARMEL__)
313 v1 = pos;
314 v2 = 0;
315 #else
316 v1 = 0;
317 v2 = pos;
318 #endif
319 asm volatile(SYSCALL_ASM
320 : [scratch] "=&r" (scratch), "=r" (result)
321 : [nr] "i" (__NR_pread64), "r" (result), "r" (a2), "r" (a3),
322 "r" (a4), "r" (v1), "r" (v2));
323 check_pread(fail_message, bufsz, result < 0 ? -result : 0, result);
324 }
325
326 static uintptr_t my_mmap(const char *segment_type, unsigned int segnum,
327 uintptr_t address, size_t size, int prot, int flags,
328 int fd, off_t pos) {
329 int scratch;
330 register uintptr_t result asm ("a1") = address;
331 register int a2 asm ("a2") = size;
332 register int a3 asm ("a3") = prot;
333 register int a4 asm ("a4") = flags;
334 register int v1 asm ("v1") = fd;
335 register int v2 asm ("v2") = pos >> 12;
336 asm volatile(SYSCALL_ASM
337 : [scratch] "=&r" (scratch), "=r" (result)
338 : [nr] "i" (__NR_mmap2), "r" (result), "r" (a2), "r" (a3),
339 "r" (a4), "r" (v1), "r" (v2));
340 if (result > (uintptr_t) -4096)
341 mmap_failure(segment_type, segnum, -result);
342 return result;
343 }
344
345 static void my_mprotect(unsigned int segnum,
346 uintptr_t address, size_t size, int prot) {
347 int scratch;
348 register int result asm ("a1") = address;
349 register int a2 asm ("a2") = size;
350 register int a3 asm ("a3") = prot;
351 asm volatile(SYSCALL_ASM
352 : [scratch] "=&r" (scratch), "=r" (result)
353 : [nr] "i" (__NR_mprotect), "r" (result), "r" (a2), "r" (a3));
354 if (result < 0)
355 mprotect_failure(segnum, -result);
356 }
357
358 #else
359 # error "Need inline syscall implementation for this architecture!"
360 #endif
361
362 /*
363 * We're avoiding libc, so no printf. The only nontrivial thing we need
364 * is rendering numbers, which is, in fact, pretty trivial.
365 */
366 static void itoa(int value, struct iovec *iov, char *buf, size_t bufsz) {
367 char *p = &buf[bufsz];
368 do {
369 *--p = "0123456789"[value % 10];
Brad Chen 2011/08/29 23:04:57 Yikes! I bet this code works just fine but I'm pre
370 value /= 10;
371 } while (value != 0);
372 iov->iov_base = p;
373 iov->iov_len = &buf[bufsz] - p;
374 }
375
376 #define STRING_IOV(string_constant, cond) \
377 { (void *) string_constant, cond ? (sizeof(string_constant) - 1) : 0 }
378
379 __attribute__((noreturn)) static void fail(const char *message,
380 const char *item1, int value1,
381 const char *item2, int value2) {
382 char valbuf1[32], valbuf2[32];
383 struct iovec iov[] = {
384 STRING_IOV("bootstrap_helper", 1),
385 STRING_IOV(DYNAMIC_LINKER, 1),
386 STRING_IOV(": ", 1),
387 { (void *) message, my_strlen(message) },
388 { (void *)item1, item1 == NULL ? 0 : my_strlen(item1) },
389 STRING_IOV("=", item1 != NULL),
390 {},
391 STRING_IOV(", ", item1 != NULL && item2 != NULL),
392 { (void *) item2, item2 == NULL ? 0 : my_strlen(item2) },
393 STRING_IOV("=", item2 != NULL),
394 {},
395 { "\n", 1 },
396 };
397 const int niov = sizeof(iov) / sizeof(iov[0]);
398
399 if (item1 != NULL)
400 itoa(value1, &iov[6], valbuf1, sizeof(valbuf1));
401 if (item2 != NULL)
402 itoa(value1, &iov[10], valbuf2, sizeof(valbuf2));
403
404 my_writev(2, iov, niov);
405 my_exit(2);
406 }
407
408 static int prot_from_phdr(const ElfW(Phdr) *phdr) {
409 int prot = 0;
410 if (phdr->p_flags & PF_R)
411 prot |= PROT_READ;
412 if (phdr->p_flags & PF_W)
413 prot |= PROT_WRITE;
414 if (phdr->p_flags & PF_X)
415 prot |= PROT_EXEC;
416 return prot;
417 }
418
419 /*
420 * Handle the "bss" portion of a segment, where the memory size
421 * exceeds the file size and we zero-fill the difference. For any
422 * whole pages in this region, we over-map anonymous pages. For the
423 * sub-page remainder, we zero-fill bytes directly.
424 */
425 static void handle_bss(unsigned int segnum, const ElfW(Phdr) *ph,
426 ElfW(Addr) load_bias, size_t pagesize) {
427 if (ph->p_memsz > ph->p_filesz) {
428 ElfW(Addr) file_end = ph->p_vaddr + load_bias + ph->p_filesz;
429 ElfW(Addr) file_page_end = (file_end + pagesize - 1) & -pagesize;
430 ElfW(Addr) page_end = (ph->p_vaddr + load_bias +
431 ph->p_memsz + pagesize - 1) & -pagesize;
432 if (page_end > file_page_end)
433 my_mmap("bss segment", segnum,
434 file_page_end, page_end - file_page_end,
435 prot_from_phdr(ph), MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
436 if (file_page_end > file_end && (ph->p_flags & PF_W))
437 my_bzero((void *) file_end, file_page_end - file_end);
438 }
439 }
440
441 /*
442 * This is the main loading code. It's called with the address of the
443 * auxiliary vector on the stack, which we need to examine and modify.
444 * It returns the dynamic linker's runtime entry point address, where
445 * we should jump to. This is called by the machine-dependent _start
446 * code (below). On return, it restores the original stack pointer
447 * and jumps to this entry point.
448 */
449 ElfW(Addr) do_load(ElfW(auxv_t) *auxv) {
450 /* Record the auxv entries that are specific to the file loaded.
451 The incoming entries point to our own static executable. */
452 ElfW(auxv_t) *av_entry = NULL;
453 ElfW(auxv_t) *av_phdr = NULL;
454 ElfW(auxv_t) *av_phnum = NULL;
455 size_t pagesize = 0;
456
457 ElfW(auxv_t) *av;
458 for (av = auxv;
459 av_entry == NULL || av_phdr == NULL || av_phnum == NULL || pagesize == 0;
460 ++av)
461 switch (av->a_type) {
462 case AT_NULL:
463 fail("Failed to find AT_ENTRY, AT_PHDR, AT_PHNUM, or AT_PAGESZ!",
464 NULL, 0, NULL, 0);
465 /*NOTREACHED*/
466 break;
467 case AT_ENTRY:
468 av_entry = av;
469 break;
470 case AT_PAGESZ:
471 pagesize = av->a_un.a_val;
472 break;
473 case AT_PHDR:
474 av_phdr = av;
475 break;
476 case AT_PHNUM:
477 av_phnum = av;
478 break;
479 }
480
481 int fd = my_open(DYNAMIC_LINKER, O_RDONLY);
482
483 ElfW(Ehdr) ehdr;
484 my_pread("Failed to read ELF header from dynamic linker! ",
485 fd, &ehdr, sizeof(ehdr), 0);
486
487 if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
488 ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
489 ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
490 ehdr.e_ident[EI_MAG3] != ELFMAG3 ||
491 ehdr.e_version != EV_CURRENT ||
492 ehdr.e_ehsize != sizeof(ehdr) ||
493 ehdr.e_phentsize != sizeof(ElfW(Phdr)))
494 fail("Dynamic linker has no valid ELF header!", NULL, 0, NULL, 0);
495
496 switch (ehdr.e_machine) {
497 #if defined(__i386__)
498 case EM_386:
499 #elif defined(__x86_64__)
500 case EM_X86_64:
501 #elif defined(__arm__)
502 case EM_ARM:
503 #else
504 # error "Don't know the e_machine value for this architecture!"
505 #endif
506 break;
507 default:
508 fail("Dynamic linker has wrong architecture! ",
509 "e_machine", ehdr.e_machine, NULL, 0);
510 break;
511 }
512
513 ElfW(Phdr) phdr[12];
Brad Chen 2011/08/29 23:04:57 make this 12 a constant please.
514 if (ehdr.e_phnum > sizeof(phdr) / sizeof(phdr[0]) || ehdr.e_phnum < 1)
515 fail("Dynamic linker has unreasonable ",
516 "e_phnum", ehdr.e_phnum, NULL, 0);
517
518 if (ehdr.e_type != ET_DYN)
519 fail("Dynamic linker not ET_DYN! ",
520 "e_type", ehdr.e_type, NULL, 0);
521
522 my_pread("Failed to read program headers from dynamic linker! ",
523 fd, phdr, sizeof(phdr[0]) * ehdr.e_phnum, ehdr.e_phoff);
524
525 size_t i = 0;
526 while (i < ehdr.e_phnum && phdr[i].p_type != PT_LOAD)
527 ++i;
528 if (i == ehdr.e_phnum)
529 fail("Dynamic linker has no PT_LOAD header!",
530 NULL, 0, NULL, 0);
531
532 const ElfW(Phdr) *first_load = &phdr[i];
533 const ElfW(Phdr) *last_load = &phdr[ehdr.e_phnum - 1];
534 while (last_load > first_load && last_load->p_type != PT_LOAD)
535 --last_load;
536
537 size_t span = last_load->p_vaddr + last_load->p_memsz - first_load->p_vaddr;
538
539 const uintptr_t mapping = my_mmap("segment", first_load - phdr,
540 first_load->p_vaddr & -pagesize, span,
541 prot_from_phdr(first_load), MAP_PRIVATE, fd,
542 first_load->p_offset & -pagesize);
543
544 const ElfW(Addr) load_bias = ((uintptr_t) mapping -
545 (first_load->p_vaddr & -pagesize));
546
547 if (first_load->p_offset > ehdr.e_phoff ||
548 first_load->p_filesz < ehdr.e_phoff + (ehdr.e_phnum * sizeof(ElfW(Phdr))))
549 fail("First load segment of dynamic linker does not contain phdrs!",
550 NULL, 0, NULL, 0);
551
552 /* Point the auxv elements at the dynamic linker's phdrs and entry. */
553 av_phdr->a_un.a_val = (ehdr.e_phoff - first_load->p_offset +
554 first_load->p_vaddr + load_bias);
555 av_phnum->a_un.a_val = ehdr.e_phnum;
556 av_entry->a_un.a_val = ehdr.e_entry + load_bias;
557
558 handle_bss(first_load - phdr, first_load, load_bias, pagesize);
559
560 ElfW(Addr) last_end = first_load->p_vaddr + load_bias + first_load->p_memsz;
561
562 const ElfW(Phdr) *ph;
563 for (ph = first_load + 1; ph <= last_load; ++ph)
564 if (ph->p_type == PT_LOAD) {
565 ElfW(Addr) last_page_end = (last_end + pagesize - 1) & -pagesize;
566
567 last_end = ph->p_vaddr + load_bias + ph->p_memsz;
568 ElfW(Addr) start = (ph->p_vaddr + load_bias) & -pagesize;
569 ElfW(Addr) end = (last_end + pagesize - 1) & -pagesize;
570
571 if (start > last_page_end)
572 my_mprotect(ph - phdr, last_page_end, start - last_page_end, PROT_NONE);
573
574 my_mmap("segment", ph - phdr,
575 start, end - start,
576 prot_from_phdr(ph), MAP_PRIVATE | MAP_FIXED, fd,
577 ph->p_offset & -pagesize);
578
579 handle_bss(ph - phdr, ph, load_bias, pagesize);
580 }
581
582 my_close(fd);
583
584 return ehdr.e_entry + load_bias;
585 }
586
587 /*
588 * We have to define the actual entry point code (_start) in assembly
589 * for each machine. The kernel startup protocol is not compatible
590 * with the normal C function calling convention. Here, we calculate
591 * the address of the auxiliary vector on the stack; call do_load
592 * (above) using the normal C convention as per the ABI; restore the
593 * original starting stack; and finally, jump to the dynamic linker's
594 * entry point address.
Brad Chen 2011/08/29 23:04:57 This is pretty cool code, but again, if we could u
595 */
596 #if defined(__i386__)
597 asm(".globl _start\n"
598 ".type _start,@function\n"
599 "_start:\n"
600 "xorl %ebp, %ebp\n"
601 "movl %esp, %ebx\n" /* Save starting SP in %ebx. */
602 "andl $-16, %esp\n" /* Align the stack as per ABI. */
603 "movl (%ebx), %eax\n" /* argc */
604 "leal 8(%ebx,%eax,4), %ecx\n" /* envp */
605 /* Find the envp element that is NULL, and auxv is past there. */
606 "0: addl $4, %ecx\n"
607 "cmpl $0, -4(%ecx)\n"
608 "jne 0b\n"
609 "pushl %ecx\n" /* Argument: auxv. */
610 "call do_load\n"
611 "movl %ebx, %esp\n" /* Restore the saved SP. */
612 "jmp *%eax\n"
613 ); /* Jump to the entry point. */
614 #elif defined(__x86_64__)
615 asm(".globl _start\n"
616 ".type _start,@function\n"
617 "_start:\n"
618 "xorq %rbp, %rbp\n"
619 "movq %rsp, %rbx\n" /* Save starting SP in %rbx. */
620 "andq $-16, %rsp\n" /* Align the stack as per ABI. */
621 "movq (%rbx), %rax\n" /* argc */
622 "leaq 16(%rbx,%rax,8), %rdi\n" /* envp */
623 /* Find the envp element that is NULL, and auxv is past there. */
624 "0: addq $8, %rdi\n"
625 "cmpq $0, -8(%rdi)\n"
626 "jne 0b\n"
627 "call do_load\n" /* Argument already in %rdi: auxv */
628 "movq %rbx, %rsp\n" /* Restore the saved SP. */
629 "jmp *%rax\n" /* Jump to the entry point. */
630 );
631 #elif defined(__arm__)
632 asm(".globl _start\n"
633 ".type _start,#function\n"
634 "_start:\n"
635 #if defined(__thumb2__)
636 ".thumb\n"
637 ".syntax unified\n"
638 #endif
639 "mov fp, #0\n"
640 "mov lr, #0\n"
641 "mov r4, sp\n" /* Save starting SP in r4. */
642 "ldr r1, [r4]\n" /* argc */
643 "add r1, r1, #2\n"
644 "add r0, r4, r1, asl #2\n" /* envp */
645 /* Find the envp element that is NULL, and auxv is past there. */
646 "0: ldr r1, [r0], #4\n"
647 "cmp r1, #0\n"
648 "bne 0b\n"
649 "bl do_load\n"
650 "mov sp, r4\n"
651 "blx r0\n"
652 );
653 #else
654 # error "Need stack-preserving _start code for this architecture!"
655 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698