Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(158)

Side by Side Diff: components/nacl/loader/nonsfi/elf_util.cc

Issue 100373005: Initial implementation of Bare Metal Mode for NaCl. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
Mark Seaborn 2013/12/06 21:42:37 Nit: "elf_loader.cc" would be a more descriptive n
hidehiko 2013/12/09 07:43:39 Done.
5 #include "components/nacl/loader/nonsfi/elf_util.h"
6
7 #include <cstring>
8 #include <string>
9 #include <sys/mman.h>
10
11 #include "base/logging.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "native_client/src/include/elf.h"
14 #include "native_client/src/include/portability.h"
15 #include "native_client/src/shared/platform/nacl_host_desc.h"
16 #include "native_client/src/trusted/desc/nacl_desc_base.h"
17 #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
18
19 // Extracted from native_client/src/trusted/service_runtime/nacl_config.h
20 // We only support 32bit x86. TODO(hidehiko): Add ARM.
21 #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && NACL_BUILD_SUBARCH == 32
22 # define NACL_ELF_E_MACHINE EM_386
23 #endif
24
25 // Copied from native_client/src/trusted/service_runtime/include/bits/mman.h
26 #define NACL_ABI_PROT_READ 0x1 // Page can be read.
27 #define NACL_ABI_PROT_WRITE 0x2 // Page can be written.
28 #define NACL_ABI_PROT_EXEC 0x4 // Page can be executed.
29 #define NACL_ABI_PROT_NONE 0x0 // Page can not be accessed.
30 #define NACL_ABI_MAP_PRIVATE 0x02 // Changes are private.
31 #define NACL_ABI_MAP_FIXED 0x10 // Interpret addr exactly.
32
33 namespace nacl {
34 namespace nonsfi {
35 namespace {
36
37 // Page size for non-SFI Mode.
38 const Elf32_Addr kNonSfiPageSize = 4096;
39 const Elf32_Addr kNonSfiPageMask = kNonSfiPageSize - 1;
40
41 void DumpElfHeader(const Elf32_Ehdr& ehdr) {
42 #define DUMP(member) \
43 #member << " = 0x" << base::HexEncode(&ehdr.member, sizeof(ehdr.member))
44
45 VLOG(2) << "\n" <<
46 "=================================================\n"
47 "Elf header\n"
48 "==================================================\n" <<
49 std::string(
50 reinterpret_cast<const char*>(ehdr.e_ident + 1), 3) << "\n" <<
51 DUMP(e_type) << "\n" <<
52 DUMP(e_machine) << "\n" <<
53 DUMP(e_version) << "\n" <<
54 DUMP(e_entry) << "\n" <<
55 DUMP(e_phoff) << "\n" <<
56 DUMP(e_shoff) << "\n" <<
57 DUMP(e_flags) << "\n" <<
58 DUMP(e_ehsize) << "\n" <<
59 DUMP(e_phentsize) << "\n" <<
60 DUMP(e_phnum) << "\n" <<
61 DUMP(e_shentsize) << "\n" <<
62 DUMP(e_shnum) << "\n" <<
63 DUMP(e_shstrndx) << "\n" <<
64 "sizeof(Elf32_Ehdr) = " << sizeof(Elf32_Ehdr);
65 #undef DUMP
66 }
67
68 void DumpElfProgramHeader(const Elf32_Phdr& phdr) {
69 #define DUMP(member) \
70 #member << " = 0x" << base::HexEncode(&phdr.member, sizeof(phdr.member))
71
72 VLOG(2) <<
73 DUMP(p_type) << "\n" <<
74 DUMP(p_offset) << "\n" <<
75 DUMP(p_vaddr) << "\n" <<
76 DUMP(p_paddr) << "\n" <<
77 DUMP(p_filesz) << "\n" <<
78 DUMP(p_memsz) << "\n" <<
79 DUMP(p_flags) << "\n" <<
80 " (" << ((phdr.p_flags & PF_R) ? "PF_R" : "") << " "
81 << ((phdr.p_flags & PF_W) ? "PF_W" : "") << " "
82 << ((phdr.p_flags & PF_W) ? "PF_X" : "") << ")\n" <<
83 DUMP(p_align) << "\n\n";
84 #undef DUMP
85 }
86
87 NonSfiErrorCode ValidateElfHeader(const Elf32_Ehdr& ehdr) {
88 if (std::memcmp(ehdr.e_ident, ELFMAG, SELFMAG)) {
89 LOG(ERROR) << "Bad elf magic";
90 return LOAD_BAD_ELF_MAGIC;
91 }
92
93 if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
94 LOG(ERROR) << "Bad elf class";
95 return LOAD_NOT_32_BIT;
96 }
97
98 if (ehdr.e_type != ET_DYN) {
99 LOG(ERROR) << "Non executable";
100 return LOAD_NOT_EXEC;
101 }
102
103 if (ehdr.e_machine != NACL_ELF_E_MACHINE) {
104 LOG(ERROR) << "Bad machine: "
105 << base::HexEncode(&ehdr.e_machine, sizeof(ehdr.e_machine));
106 return LOAD_BAD_MACHINE;
107 }
108
109 if (ehdr.e_version != EV_CURRENT) {
110 LOG(ERROR) << "Bad elf version: "
111 << base::HexEncode(&ehdr.e_version, sizeof(ehdr.e_version));
112 }
113
114 return LOAD_OK;
115 }
116
117 // Returns the address of the page starting at address 'addr' for non-SFI mode.
118 Elf32_Addr GetPageStart(Elf32_Addr addr) {
Mark Seaborn 2013/12/06 21:42:37 So that we can test in x86-64 builds, all of the E
hidehiko 2013/12/09 07:43:39 Replaced by ElfW(Addr) by following the manner you
119 return addr & ~kNonSfiPageMask;
120 }
121
122 // Returns the offset of address 'addr' in its memory page. In other words,
123 // this equals to 'addr' - GetPageStart(addr).
124 Elf32_Addr GetPageOffset(Elf32_Addr addr) {
125 return addr & kNonSfiPageMask;
126 }
127
128 // Returns the address of the next page after address 'addr', unless 'addr' is
129 // at the start of a page. This equals to:
130 // addr == GetPageStart(addr) ? addr : GetPageStart(addr) + kNonSfiPageSize
131 Elf32_Addr GetPageEnd(Elf32_Addr addr) {
132 return GetPageStart(addr + kNonSfiPageSize - 1);
133 }
134
135 // Converts the pflags (in phdr) to mmap's prot flags.
136 int PFlagsToProt(int pflags) {
137 return ((pflags & PF_X) ? PROT_EXEC : 0) |
138 ((pflags & PF_R) ? PROT_READ : 0) |
139 ((pflags & PF_W) ? PROT_WRITE : 0);
140 }
141
142 // Converts the pflags (in phdr) to NaCl ABI's prot flags.
143 int PFlagsToNaClProt(int pflags) {
144 return ((pflags & PF_X) ? NACL_ABI_PROT_EXEC : 0) |
145 ((pflags & PF_R) ? NACL_ABI_PROT_READ : 0) |
146 ((pflags & PF_W) ? NACL_ABI_PROT_WRITE : 0);
147 }
148
149 // Returns the load size for the given phdrs, or 0 on error.
150 Elf32_Addr GetLoadSize(const Elf32_Phdr* phdrs, int phnum) {
Mark Seaborn 2013/12/06 21:42:37 So that this works on x86-64, you can use "ElfW(Ph
hidehiko 2013/12/09 07:43:39 Thank you for your navigation. Done.
151 Elf32_Addr begin = 0xFFFFFFFFU;
152 Elf32_Addr end = 0;
153
154 VLOG(4) << "GetLoadSize: phnum=" << phnum;
155 for (int i = 0; i < phnum; ++i) {
156 const Elf32_Phdr& phdr = phdrs[i];
157 if (phdr.p_type != PT_LOAD) {
158 // Do nothing for non PT_LOAD header.
159 continue;
160 }
161
162 begin = std::min(begin, phdr.p_vaddr);
163 end = std::max(end, phdr.p_vaddr + phdr.p_memsz);
164 }
165
166 if (begin > end) {
167 // The end address looks overflowing.
168 return 0;
169 }
170
171 return GetPageEnd(end) - GetPageStart(begin);
172 }
173
174 // Reserves the memory for the given phdrs, and stores the memory address,
175 // its size and bias to the load_start, load_size and load_bias.
176 NonSfiErrorCode ReserveMemory(const Elf32_Phdr* phdrs,
177 int phnum,
178 Elf32_Addr* load_bias) {
179 VLOG(4) << "ReserveMemory";
180
181 Elf32_Addr size = GetLoadSize(phdrs, phnum);
182 if (size == 0) {
183 LOG(ERROR) << "ReserveMemory failed to calculate size";
184 return LOAD_UNLOADABLE;
185 }
186 VLOG(4) << "ReserveMemory: size=" << size;
187
188 // Make sure that the given program headers represents PIE binary.
189 for (int i = 0; i < phnum; ++i) {
190 if (phdrs[i].p_type == PT_LOAD) {
191 // Here, phdrs[i] is the first loadable segment.
192 if (phdrs[i].p_vaddr != 0) {
193 // The binary is not PIE (i.e. needs to be loaded onto fixed addressed
194 // memory. We don't support such a case.
195 LOG(ERROR)
196 << "Reservememory: Non-PIE binary loading is not supported.";
197 return LOAD_UNLOADABLE;
198 }
199 break;
200 }
201 }
202
203 void* start = mmap(0, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
204 if (start == MAP_FAILED) {
205 LOG(ERROR) << "ReserveMemory: failed to mmap.";
206 return LOAD_NO_MEMORY;
207 }
208
209 *load_bias = reinterpret_cast<Elf32_Addr>(start);
210 VLOG(4) << "ReserveMemory: success";
211 return LOAD_OK;
212 }
213
214 NonSfiErrorCode LoadSegments(
215 const Elf32_Phdr* phdrs, int phnum, Elf32_Addr load_bias,
216 struct NaClDesc* descriptor) {
217 for (int i = 0; i < phnum; ++i) {
218 const Elf32_Phdr& phdr = phdrs[i];
219 if (phdr.p_type != PT_LOAD) {
220 // Not a load target.
221 VLOG(4) << "LoadSegments: [" << i << "] Skipped";
222 continue;
223 }
224
225 VLOG(4) << "LoadSegments: [" << i << "] Loading...";
226
227 // Addresses on the memory.
228 Elf32_Addr seg_start = phdr.p_vaddr + load_bias;
229 Elf32_Addr seg_end = seg_start + phdr.p_memsz;
230 Elf32_Addr seg_page_start = GetPageStart(seg_start);
231 Elf32_Addr seg_page_end = GetPageEnd(seg_end);
232 Elf32_Addr seg_file_end = seg_start + phdr.p_filesz;
233
234 // Addresses on the file content.
235 Elf32_Addr file_start = phdr.p_offset;
236 Elf32_Addr file_end = file_start + phdr.p_filesz;
237 Elf32_Addr file_page_start = GetPageStart(file_start);
238
239 uintptr_t seg_addr = (*NACL_VTBL(NaClDesc, descriptor)->Map)(
240 descriptor,
241 NaClDescEffectorTrustedMem(),
242 reinterpret_cast<void *>(seg_page_start),
243 file_end - file_page_start,
244 PFlagsToNaClProt(phdr.p_flags),
245 NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
246 file_page_start);
247 if (NaClPtrIsNegErrno(&seg_addr)) {
248 LOG(ERROR) << "LoadSegments: [" << i << "] mmap failed, " << seg_addr;
249 return LOAD_NO_MEMORY;
250 }
251
252 // Fill Zero between the segment end and the page boundary if necessary
253 // (i.e. if the segment doesn't end on a page boundary).
254 Elf32_Addr seg_file_end_offset = GetPageOffset(seg_file_end);
255 if ((phdr.p_flags & PF_W) && seg_file_end_offset > 0) {
256 memset(reinterpret_cast<void *>(seg_file_end), 0,
257 kNonSfiPageSize - seg_file_end_offset);
258 }
259
260 // Hereafter, seg_file_end is now the first page address after the file
261 // content. If seg_end is larger, we need to zero anything between them.
262 // This is done by using a private anonymous mmap for all extra pages.
263 seg_file_end = GetPageEnd(seg_file_end);
264 if (seg_page_end > seg_file_end) {
265 void* zeromap = mmap(reinterpret_cast<void *>(seg_file_end),
266 seg_page_end - seg_file_end,
267 PFlagsToProt(phdr.p_flags),
268 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
269 -1, 0);
270 if (zeromap == MAP_FAILED) {
271 LOG(ERROR) << "LoadSegments: [" << i << "] Failed to zeromap.";
272 return LOAD_NO_MEMORY;
273 }
274 }
275 }
276 return LOAD_OK;
277 }
278
279 } // namespace
280
281 ElfImage::ElfImage() {
282 }
283
284 ElfImage::~ElfImage() {
285 }
286
287 NonSfiErrorCode ElfImage::Read(struct NaClDesc* descriptor) {
288 // Read elf header.
289 ssize_t read_ret = (*NACL_VTBL(NaClDesc, descriptor)->PRead)(
290 descriptor, &ehdr_, sizeof(ehdr_), 0);
291 if (NaClSSizeIsNegErrno(&read_ret) ||
292 static_cast<size_t>(read_ret) != sizeof(ehdr_)) {
293 LOG(ERROR) << "Could not load elf headers.";
294 return LOAD_READ_ERROR;
295 }
296
297 DumpElfHeader(ehdr_);
298 NonSfiErrorCode error_code = ValidateElfHeader(ehdr_);
299 if (error_code != LOAD_OK)
300 return error_code;
301
302 // Read program headers.
303 if (ehdr_.e_phnum > MAX_PROGRAM_HEADERS) {
304 LOG(ERROR) << "Too many program headers";
305 return LOAD_TOO_MANY_PROG_HDRS;
306 }
307
308 if (ehdr_.e_phentsize != sizeof(phdrs_[0])) {
309 LOG(ERROR) << "Bad program headers size\n"
310 << " ehdr_.e_phentsize = " << ehdr_.e_phentsize << "\n"
311 << " sizeof phdrs_[0] = " << sizeof(phdrs_[0]);
312 return LOAD_BAD_PHENTSIZE;
313 }
314
315 size_t read_size = ehdr_.e_phnum * ehdr_.e_phentsize;
316 read_ret = (*NACL_VTBL(NaClDesc, descriptor)->PRead)(
317 descriptor, phdrs_, read_size, ehdr_.e_phoff);
318
319 if (NaClSSizeIsNegErrno(&read_ret) ||
320 static_cast<size_t>(read_ret) != read_size) {
321 LOG(ERROR) << "Cannot load prog headers";
322 return LOAD_READ_ERROR;
323 }
324
325 VLOG(2) << "\n" <<
326 "=================================================\n"
327 "Elf Program headers\n"
328 "==================================================\n";
329 for (int i = 0; i < ehdr_.e_phnum; ++i) {
330 DumpElfProgramHeader(phdrs_[i]);
331 }
332
333 return LOAD_OK;
334 }
335
336 NonSfiErrorCode ElfImage::Load(struct NaClDesc* descriptor) {
337 VLOG(3) << "ElfImage::Load";
338
339 NonSfiErrorCode error = ReserveMemory(phdrs_, ehdr_.e_phnum, &load_bias_);
340 if (error != LOAD_OK) {
341 LOG(ERROR) << "ElfImage::Load: Failed to allocate memory.";
342 return error;
343 }
344 VLOG(3) << "ElfImage::Load: Loader maps the program to 0x"
345 << base::HexEncode(&load_bias_, sizeof(load_bias_));
346
347 error = LoadSegments(phdrs_, ehdr_.e_phnum, load_bias_, descriptor);
348 if (error != LOAD_OK) {
349 LOG(ERROR) << "ElfImage::Load: Failed to load segments";
350 return error;
351 }
352
353 return LOAD_OK;
354 }
355
356 } // namespace nonsfi
357 } // namespace nacl
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698