Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: tools/relocation_packer/src/relocation_packer_elf_file.cc

Issue 310483003: Add a host tool to pack R_ARM_RELATIVE relocations in libchrome.<ver>.so. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // TODO(simonb): Extend for 64-bit target libraries.
6 // TODO(simonb): What if we cannot find two empty .dynamic slots?
7
8 #include "relocation_packer_elf_file.h"
9
10 #include <stdlib.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <string>
14 #include <vector>
15
16 #include "libelf.h"
17 #include "relocation_packer_debug.h"
18 #include "relocation_packer_packer.h"
19
20 namespace relocation_packer {
21
22 // Stub identifier written to 'null out' packed data.
23 const Elf32_Word ElfFile::kStubIdentifier;
24
25 // Out-of-band dynamic tags used to indicate the offset and size of the
26 // .android.rel.dyn section.
27 const Elf32_Sword ElfFile::DT_ANDROID_ARM_REL_OFFSET;
28 const Elf32_Sword ElfFile::DT_ANDROID_ARM_REL_SIZE;
29
30 namespace {
31
32 // Rewrite section data. Allocates new data and makes it the data element's
33 // buffer. Relies on program exit to free allocated data.
rmcilroy 2014/06/02 15:16:35 I'm not sure I like relying on program exit to fre
simonb (inactive) 2014/06/04 16:40:35 data->d_buf is allocated by libelf (unless we've b
34 void RewriteSectionData(Elf_Data* data,
35 const void* section_data,
36 size_t size) {
37 CHECK(size == data->d_size);
38 uint8_t* area = new uint8_t[size];
39 ::memcpy(area, section_data, size);
40 data->d_buf = area;
41 }
42
43 // Resize a section. If the new size is larger than the current size, open
44 // up a hole by increasing file offsets that come after the hole. If smaller
45 // than the current size, remove the hole by decreasing those offsets.
46 void ResizeSection(Elf* elf,
rmcilroy 2014/06/02 15:16:35 This function is pretty massive and could benefit
simonb (inactive) 2014/06/04 16:40:35 Done.
47 Elf_Scn* section,
48 size_t new_size) {
49 Elf32_Shdr* section_header = elf32_getshdr(section);
50 if (section_header->sh_size == new_size)
51 return;
52
53 // Note if we are resizing the real .rel.dyn. If yes, then we have to
54 // massage d_un.d_val in the dynamic section where d_tag is DT_RELSZ and
55 // DT_RELCOUNT.
56 size_t string_index;
57 elf_getshdrstrndx(elf, &string_index);
58 const std::string section_name =
59 elf_strptr(elf, string_index, section_header->sh_name);
60 const bool is_rel_dyn_resize = section_name == ".rel.dyn";
61
62 // Require that the section has exactly one data entry, so that the section
63 // size and the data size are the same. True in practice for all sections
64 // we resize when packing or unpacking.
65 Elf_Data* data = elf_getdata(section, NULL);
rmcilroy 2014/06/02 15:16:35 nit - section_data
66 CHECK(data && elf_getdata(section, data) == NULL);
rmcilroy 2014/06/02 15:16:35 From the man page it looks like the "&& elf_getdat
simonb (inactive) 2014/06/04 16:40:35 Done.
67 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size);
68
69 const Elf32_Off hole_start = section_header->sh_offset;
70 const int32_t hole_size = new_size - data->d_size;
71
72 VLOG_IF(hole_size > 0, "expand section size = %lu\n", data->d_size);
73 VLOG_IF(hole_size < 0, "shrink section size = %lu\n", data->d_size);
74
75 // Resize the data and the section header.
76 data->d_size += hole_size;
77 section_header->sh_size += hole_size;
78
79 Elf32_Ehdr* elf_header = elf32_getehdr(elf);
80 Elf32_Phdr* elf_program_header = elf32_getphdr(elf);
81
82 // Add the hole size to all offsets in the ELF file that are after the
83 // start of the hole. If the hole size is positive we are expanding the
84 // section to create a new hole; if negative, we are closing up a hole.
85
86 // Start with the main ELF header.
87 if (elf_header->e_phoff > hole_start) {
88 elf_header->e_phoff += hole_size;
89 VLOG("e_phoff adjusted to %u\n", elf_header->e_phoff);
90 }
91 if (elf_header->e_shoff > hole_start) {
92 elf_header->e_shoff += hole_size;
93 VLOG("e_shoff adjusted to %u\n", elf_header->e_shoff);
94 }
95
96 // Note of the DYNAMIC entry, picked up while iterating program headers.
97 const Elf32_Phdr* dynamic_program_header = NULL;
98
99 // Adjust all program headers for the hole.
100 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
101 Elf32_Phdr* program_header = &elf_program_header[i];
102 if (program_header->p_offset > hole_start) {
103 // The hole start is past this segment, so adjust offsets and addrs.
104 program_header->p_offset += hole_size;
105 VLOG("phdr %lu p_offset adjusted to %u\n", i, program_header->p_offset);
106
107 // Only adjust vaddr and paddr if this program header has them.
108 if (program_header->p_vaddr != 0) {
109 program_header->p_vaddr += hole_size;
110 VLOG("phdr %lu p_vaddr adjusted to %u\n", i, program_header->p_vaddr);
111 }
112 if (program_header->p_paddr != 0) {
113 program_header->p_paddr += hole_size;
114 VLOG("phdr %lu p_paddr adjusted to %u\n", i, program_header->p_paddr);
115 }
116 } else if (program_header->p_offset +
117 program_header->p_filesz > hole_start) {
118 // The hole start is within this segment, so adjust file and in-memory
119 // sizes, but leave offsets and addrs unchanged.
120 program_header->p_filesz += hole_size;
121 VLOG("phdr %lu p_filesz adjusted to %u\n", i, program_header->p_filesz);
122 program_header->p_memsz += hole_size;
123 VLOG("phdr %lu p_memsz adjusted to %u\n", i, program_header->p_memsz);
124 }
125
126 // If this is the DYNAMIC program header, note it for later.
127 if (program_header->p_type == PT_DYNAMIC) {
128 dynamic_program_header = program_header;
129 }
130 }
131 CHECK(dynamic_program_header);
132
133 // Notes of some sections requiring special attention, picked up during
134 // section iteration.
135 Elf_Scn* dynamic_section = NULL;
136 Elf_Scn* dynsym_section = NULL;
137 Elf_Scn* relplt_section = NULL;
138 Elf_Scn* symtab_section = NULL;
rmcilroy 2014/06/02 15:16:35 Maybe you could just save these sections away as f
simonb (inactive) 2014/06/04 16:40:35 Slightly fiddly. See if you like the refactored c
rmcilroy 2014/06/07 11:49:06 This looks better, thanks.
139 // Note the offset of .android.rel.dyn also.
140 Elf32_Off android_rel_dyn_offset = 0;
141
142 // Adjust all section headers for the hole.
143 section = NULL;
144 while ((section = elf_nextscn(elf, section)) != NULL) {
145 Elf32_Shdr* section_header = elf32_getshdr(section);
146 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
147 if (section_header->sh_offset > hole_start) {
148 section_header->sh_offset += hole_size;
149 VLOG("section %s sh_offset"
150 " adjusted to %u\n", name.c_str(), section_header->sh_offset);
151 // Only adjust section addr if this section has one.
152 if (section_header->sh_addr != 0) {
153 section_header->sh_addr += hole_size;
154 VLOG("section %s sh_addr"
155 " adjusted to %u\n", name.c_str(), section_header->sh_addr);
156 }
157 }
158
159 // Note the special sections that we are looking for as we go along.
160 if (section_header->sh_offset == dynamic_program_header->p_offset) {
161 dynamic_section = section;
162 }
163 if (name == ".dynsym") {
164 dynsym_section = section;
165 }
166 if (name == ".rel.plt") {
167 relplt_section = section;
168 }
169 if (name == ".symtab") {
170 symtab_section = section;
171 }
172
173 // Note .android.rel.dyn offset.
174 if (name == ".android.rel.dyn") {
175 android_rel_dyn_offset = section_header->sh_offset;
176 }
177 }
178 CHECK(dynamic_section != NULL);
179 CHECK(dynsym_section != NULL);
180 CHECK(relplt_section != NULL);
181 CHECK(android_rel_dyn_offset != 0);
182
183 // Load the .dynamic section into a local array. Because we have to edit
184 // the current contents of .dynamic we disallow resizing it.
185 CHECK(section != dynamic_section);
rmcilroy 2014/06/02 15:16:35 nit - do this check at the top of the function
simonb (inactive) 2014/06/04 16:40:35 Can't; dynamic_section is assigned in the loop abo
186 data = elf_getdata(dynamic_section, NULL);
rmcilroy 2014/06/02 15:16:35 nit - dynamic_section_data
187 CHECK(data && elf_getdata(dynamic_section, data) == NULL);
rmcilroy 2014/06/02 15:16:35 ditto
188
189 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
190 std::vector<Elf32_Dyn> dynamics(
191 dynamic_base,
192 dynamic_base + data->d_size / sizeof(dynamics[0]));
193
194 for (size_t i = 0; i < dynamics.size(); ++i) {
195 Elf32_Dyn* dynamic = &dynamics[i];
196 const Elf32_Sword tag = dynamic->d_tag;
197 // Any tags that hold offsets are adjustment candidates.
198 const bool is_adjustable = (tag == DT_PLTGOT ||
199 tag == DT_HASH ||
200 tag == DT_STRTAB ||
201 tag == DT_SYMTAB ||
202 tag == DT_RELA ||
203 tag == DT_INIT ||
204 tag == DT_FINI ||
205 tag == DT_REL ||
206 tag == DT_JMPREL ||
207 tag == DT_INIT_ARRAY ||
208 tag == DT_FINI_ARRAY ||
209 tag == ElfFile::DT_ANDROID_ARM_REL_OFFSET);
210 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) {
211 dynamic->d_un.d_ptr += hole_size;
212 VLOG("dynamic[%lu] %u"
213 " d_ptr adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_ptr);
214 }
215
216 // If we are specifically resizing .rel.dyn, we need to make some added
217 // adjustments to tags that indicate the counts of R_ARM_RELATIVE
218 // relocations in the shared object.
219 if (is_rel_dyn_resize) {
220 // DT_RELSZ is the overall size of relocations. Adjust by hole size.
221 if (tag == DT_RELSZ) {
222 dynamic->d_un.d_val += hole_size;
223 VLOG("dynamic[%lu] %u"
224 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
225 }
226
227 // The crazy linker does not use DT_RELCOUNT, but we keep it updated
228 // anyway. In practice the section hole is always equal to the size
229 // of R_ARM_RELATIVE relocations, and DT_RELCOUNT is the count of
230 // relative relocations. So closing a hole on packing reduces
231 // DT_RELCOUNT to zero, and opening a hole on unpacking restores it to
232 // its pre-packed value.
233 if (tag == DT_RELCOUNT) {
234 dynamic->d_un.d_val += hole_size / sizeof(Elf32_Rel);
235 VLOG("dynamic[%lu] %u"
236 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
237 }
238
239 // DT_RELENT doesn't change, but make sure it is what we expect.
240 if (tag == DT_RELENT) {
241 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel));
242 }
243 }
244 }
245
246 void* section_data = &dynamics[0];
rmcilroy 2014/06/02 15:16:35 nit - new_dynamic_section_data
247 size_t bytes = dynamics.size() * sizeof(dynamics[0]);
248 RewriteSectionData(data, section_data, bytes);
249
250 // Load the .dynsym section into a local array. We need to adjust the
251 // values for the symbols represented in it.
252 data = elf_getdata(dynsym_section, NULL);
rmcilroy 2014/06/02 15:16:35 nit - dynsym_section_data (and similar for others
253 CHECK(data && elf_getdata(dynsym_section, data) == NULL);
254
255 const Elf32_Sym* dynsym_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
256 std::vector<Elf32_Sym> dynsyms
257 (dynsym_base,
258 dynsym_base + data->d_size / sizeof(dynsyms[0]));
259
260 for (size_t i = 0; i < dynsyms.size(); ++i) {
261 Elf32_Sym* dynsym = &dynsyms[i];
262 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info));
263 const bool is_adjustable = (type == STT_OBJECT ||
264 type == STT_FUNC ||
265 type == STT_SECTION ||
266 type == STT_FILE ||
267 type == STT_COMMON ||
268 type == STT_TLS);
269 if (is_adjustable && dynsym->st_value > hole_start) {
270 dynsym->st_value += hole_size;
271 VLOG("dynsym[%lu] type=%u"
272 " st_value adjusted to %u\n", i, type, dynsym->st_value);
273 }
274 }
275
276 section_data = &dynsyms[0];
277 bytes = dynsyms.size() * sizeof(dynsyms[0]);
278 RewriteSectionData(data, section_data, bytes);
279
280 // Load the .rel.plt section into a local array. We need to adjust the
281 // offset of every relocation inside it that falls beyond the hole start.
282 data = elf_getdata(relplt_section, NULL);
283 CHECK(data && elf_getdata(relplt_section, data) == NULL);
284
285 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
286 std::vector<Elf32_Rel> relplts(
287 relplt_base,
288 relplt_base + data->d_size / sizeof(relplts[0]));
289
290 for (size_t i = 0; i < relplts.size(); ++i) {
291 Elf32_Rel* relplt = &relplts[i];
292 if (relplt->r_offset > hole_start) {
293 relplt->r_offset += hole_size;
294 VLOG("relplt[%lu] r_offset adjusted to %u\n", i, relplt->r_offset);
295 }
296 }
297
298 section_data = &relplts[0];
299 bytes = relplts.size() * sizeof(relplts[0]);
300 RewriteSectionData(data, section_data, bytes);
301
302 // .symtab may be absent if the shared library was stripped.
303 if (symtab_section) {
304 // Load the .symtab section into a local array. We need to adjust the
305 // offset of every relocation inside it that falls beyond the hole start.
306 data = elf_getdata(symtab_section, NULL);
307 CHECK(data && elf_getdata(symtab_section, data) == NULL);
308
309 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
310 std::vector<Elf32_Sym> symtab(
311 symtab_base,
312 symtab_base + data->d_size / sizeof(symtab[0]));
313
314 for (size_t i = 0; i < symtab.size(); ++i) {
315 Elf32_Sym* sym = &symtab[i];
316 if (sym->st_value > hole_start) {
317 sym->st_value += hole_size;
318 VLOG("symtab[%lu] value adjusted to %u\n", i, sym->st_value);
319 }
320 }
321
322 section_data = &symtab[0];
323 bytes = symtab.size() * sizeof(symtab[0]);
324 RewriteSectionData(data, section_data, bytes);
325 }
326 }
327
328 } // namespace
329
330 // Load the complete ELF file into a memory image in libelf, and identify
331 // the .rel.dyn, .dynamic, and .android.rel.dyn sections.
332 bool ElfFile::Load(int fd) {
333 elf_ = elf_begin(fd, ELF_C_RDWR, NULL);
334 CHECK(elf_);
335
336 if (elf_kind(elf_) != ELF_K_ELF) {
337 LOG("File not in ELF format\n");
338 return false;
339 }
340
341 Elf32_Ehdr* elf_header = elf32_getehdr(elf_);
342 if (!elf_header) {
343 LOG("Failed to load ELF header\n");
344 return false;
345 }
346 if (elf_header->e_machine != EM_ARM) {
347 LOG("File is not an arm32 ELF file\n");
348 return false;
349 }
350
351 // Require that our endianness matches that of the target, and that both
352 // are little-endian. Safe for all current build/target combinations.
353 const int endian = static_cast<int>(elf_header->e_ident[5]);
354 CHECK(endian == ELFDATA2LSB);
355 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
356
357 VLOG("endian = %u\n", endian);
358
359 VLOG("e_phoff = %u\n", elf_header->e_phoff);
360 VLOG("e_shoff = %u\n", elf_header->e_shoff);
361 VLOG("e_ehsize = %u\n", elf_header->e_ehsize);
362 VLOG("e_phentsize = %u\n", elf_header->e_phentsize);
363 VLOG("e_phnum = %u\n", elf_header->e_phnum);
364 VLOG("e_shnum = %u\n", elf_header->e_shnum);
365 VLOG("e_shstrndx = %u\n", elf_header->e_shstrndx);
366
367 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_);
368 CHECK(elf_program_header);
369
370 const Elf32_Phdr* dynamic_program_header = NULL;
371 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
372 const Elf32_Phdr* program_header = &elf_program_header[i];
373 std::string type;
374 switch (program_header->p_type) {
375 case 0: type = "NULL"; break;
rmcilroy 2014/06/02 15:16:35 nit - you use PT_DYNANIC below, could you use PT_*
simonb (inactive) 2014/06/04 16:40:35 Done.
376 case 1: type = "LOAD"; break;
377 case 2: type = "DYNAMIC"; break;
378 case 3: type = "INTERP"; break;
379 case 4: type = "NOTE"; break;
380 case 5: type = "SHLIB"; break;
381 case 6: type = "PHDR"; break;
382 case 7: type = "TLS"; break;
383 default: type = "(OTHER)"; break;
384 }
385 VLOG("phdr %lu : %s\n", i, type.c_str());
386 VLOG(" p_offset = %u\n", program_header->p_offset);
387 VLOG(" p_vaddr = %u\n", program_header->p_vaddr);
388 VLOG(" p_paddr = %u\n", program_header->p_paddr);
389 VLOG(" p_filesz = %u\n", program_header->p_filesz);
390 VLOG(" p_memsz = %u\n", program_header->p_memsz);
rmcilroy 2014/06/02 15:16:35 Please extract the debugging code (from line 373 t
simonb (inactive) 2014/06/04 16:40:35 Done.
391 if (program_header->p_type == PT_DYNAMIC) {
392 CHECK(dynamic_program_header == NULL);
393 dynamic_program_header = program_header;
394 }
395 }
396 CHECK(dynamic_program_header != NULL);
397
398 size_t string_index;
399 elf_getshdrstrndx(elf_, &string_index);
400
401 // Notes of the .rel.dyn, .android.rel.dyn, and .dynamic sections. Found
402 // while iterating sections, and later stored in class attributes.
403 Elf_Scn* found_rel_dyn_section = NULL;
404 Elf_Scn* found_android_rel_dyn_section = NULL;
405 Elf_Scn* found_dynamic_section = NULL;
406
407 // Flag set if we encounter any .debug* section. We do not adjust any
408 // offsets or addresses of any debug data, so if we find one of these then
409 // the resulting output shared object should still run, but might not be
410 // usable for debugging, disassembly, and so on. Provides a warning if
411 // this occurs.
412 bool is_debug = false;
rmcilroy 2014/06/02 15:16:35 nit - has_debug_section
simonb (inactive) 2014/06/04 16:40:35 Done.
413
414 Elf_Scn* section = NULL;
415 while ((section = elf_nextscn(elf_, section)) != NULL) {
416 const Elf32_Shdr* section_header = elf32_getshdr(section);
417 std::string name = elf_strptr(elf_, string_index, section_header->sh_name);
418 VLOG("section %s\n", name.c_str());
419 VLOG(" sh_addr = %u\n", section_header->sh_addr);
420 VLOG(" sh_offset = %u\n", section_header->sh_offset);
421 VLOG(" sh_size = %u\n", section_header->sh_size);
422
423 // Note special sections as we encounter them.
424 if (name == ".rel.dyn") {
425 found_rel_dyn_section = section;
426 }
427 if (name == ".android.rel.dyn") {
428 found_android_rel_dyn_section = section;
429 }
430 if (section_header->sh_offset == dynamic_program_header->p_offset) {
431 found_dynamic_section = section;
432 }
433
434 // If we find a section named .debug*, set the debug warning flag.
435 if (std::string(name).find(".debug") == 0) {
436 is_debug = true;
437 }
438
439 Elf_Data* data = NULL;
440 while ((data = elf_getdata(section, data)) != NULL) {
441 VLOG(" data\n");
442 VLOG(" d_buf = %p\n", data->d_buf);
443 VLOG(" d_off = %lu\n", data->d_off);
444 VLOG(" d_size = %lu\n", data->d_size);
445 }
rmcilroy 2014/06/02 15:16:35 nit - extract verbose debugging code (lines 418-42
simonb (inactive) 2014/06/04 16:40:35 Done.
446 }
447
448 // Loading failed if we did not find the required special sections.
449 if (!found_rel_dyn_section) {
450 LOG("Missing .rel.dyn section\n");
451 return false;
452 }
453 if (!found_dynamic_section) {
454 LOG("Missing .dynamic section\n");
455 return false;
456 }
457 if (!found_android_rel_dyn_section) {
458 LOG("Missing .android.rel.dyn section (not split/packed?)\n");
459 return false;
460 }
461
462 if (is_debug) {
463 LOG("WARNING: found .debug section(s), and ignored them\n");
464 }
465
466 fd_ = fd;
467 rel_dyn_section_ = found_rel_dyn_section;
468 dynamic_section_ = found_dynamic_section;
469 android_rel_dyn_section_ = found_android_rel_dyn_section;
470 return true;
471 }
472
473 namespace {
rmcilroy 2014/06/02 15:16:35 nit - I would prefer a single anonymous namespace
simonb (inactive) 2014/06/04 16:40:35 I prefer helper functions to be defined close to t
rmcilroy 2014/06/07 11:49:06 In that case, please move ResizeSection (and all a
simonb (inactive) 2014/06/09 14:39:19 Good point. Done. Also moved AddDynamicEntry and
474
475 // Replace the first free (unused) slot in a dynamics vector with the given
476 // value. The vector always ends with a free (unused) element, so the slot
477 // found cannot be the last one in the vector.
rmcilroy 2014/06/02 15:16:35 As I commented in the Readme, this seems fragile,
simonb (inactive) 2014/06/04 16:40:35 gold --spare-dynamic-tags. Code updated.
478 void AddDynamicEntry(Elf32_Dyn dyn,
479 std::vector<Elf32_Dyn>* dynamics) {
480 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
rmcilroy 2014/06/02 15:16:35 nit - add a comment: // The vector should always e
simonb (inactive) 2014/06/04 16:40:35 Done.
481 Elf32_Dyn &slot = dynamics->at(i);
482 if (slot.d_tag == DT_NULL) {
483 slot = dyn;
484 VLOG("dynamic[%lu] overwritten with %u\n", i, dyn.d_tag);
485 return;
486 }
487 }
488
489 // TODO(simonb): Are sufficient free slots always available?
490 // No free dynamics vector slot was found.
491 NOTREACHED();
492 }
493
494 // Apply R_ARM_RELATIVE relocations to the file data to which they refer.
495 // This relocates data into the area it will occupy after the hole in
496 // .rel.dyn is added or removed.
497 void ApplyRelocations(Elf* elf,
rmcilroy 2014/06/02 15:16:35 Should this maybe be called AdjustRelocationTarget
simonb (inactive) 2014/06/04 16:40:35 Done.
498 Elf32_Off hole_start,
499 size_t hole_size,
500 const std::vector<Elf32_Rel>& relative_relocations) {
501 Elf_Scn* section = NULL;
502 while ((section = elf_nextscn(elf, section)) != NULL) {
503 const Elf32_Shdr* section_header = elf32_getshdr(section);
504
505 // Identify this section's start and end addresses.
506 const Elf32_Addr section_start = section_header->sh_addr;
507 const Elf32_Addr section_end = section_start + section_header->sh_size;
508
509 Elf_Data* data = elf_getdata(section, NULL);
510 CHECK(data && elf_getdata(section, data) == NULL);
511
512 // Ignore sections with no effective data.
513 if (data->d_buf == NULL)
514 continue;
515
516 // Create a copy-on-write pointer to the section's data.
517 uint8_t* area = reinterpret_cast<uint8_t*>(data->d_buf);
518
519 for (size_t i = 0; i < relative_relocations.size(); ++i) {
520 const Elf32_Rel* relocation = &relative_relocations[i];
521 // See if this relocation points into the current section.
522 const bool is_encompassed = relocation->r_offset >= section_start &&
523 relocation->r_offset < section_end;
524 if (is_encompassed) {
rmcilroy 2014/06/02 15:16:35 nit - just inline is_encompassed caluclation into
simonb (inactive) 2014/06/04 16:40:35 Done.
525 Elf32_Addr byte_offset = relocation->r_offset - section_start;
526 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
527
528 // See if the relocation's target is after the hole's start.
529 if (*target > hole_start) {
530 if (area == data->d_buf) {
531 // We are about to write but we have not yet copied the buffer.
532 // Copy now, and recompute target to point into the newly
533 // allocated copy-on-write buffer.
534 area = new uint8_t[data->d_size];
535 ::memcpy(area, data->d_buf, data->d_size);
536 target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
537 }
538 *target += hole_size;
539 VLOG("relocation[%lu] target adjusted to %u\n", i, *target);
540 }
541 }
542 }
543
544 // If we applied any relocation to this section, write it back.
545 if (area != data->d_buf) {
546 RewriteSectionData(data, area, data->d_size);
547 delete [] area;
548 }
549 }
550 }
551
552 // Adjust relocations so that the offset that they indicate will be correct
553 // after the hole in .rel.dyn is added or removed (in effect, relocate the
554 // relocations).
555 void AdjustRelocations(Elf32_Off hole_start,
556 size_t hole_size,
557 std::vector<Elf32_Rel>* relocations) {
558 for (size_t i = 0; i < relocations->size(); ++i) {
559 Elf32_Rel* relocation = &relocations->at(i);
560 if (relocation->r_offset > hole_start) {
561 relocation->r_offset += hole_size;
562 VLOG("relocation[%lu] offset adjusted to %u\n", i, relocation->r_offset);
563 }
564 }
565 }
566
567 } // namespace
568
569 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data
570 // into .android.rel.dyn.
571 bool ElfFile::PackRelocations() {
572 // Retrieve the current .rel.dyn section data.
573 Elf_Data* data = elf_getdata(rel_dyn_section_, NULL);
574 CHECK(data && elf_getdata(rel_dyn_section_, data) == NULL);
575
576 // Convert data to a vector of Elf32 relocations.
577 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
578 std::vector<Elf32_Rel> relocations(
579 relocations_base,
580 relocations_base + data->d_size / sizeof(relocations[0]));
581
582 std::vector<Elf32_Rel> relative_relocations;
583 std::vector<Elf32_Rel> other_relocations;
584
585 // Filter relocations into those that are R_ARM_RELATIVE and others.
586 for (size_t i = 0; i < relocations.size(); ++i) {
587 const Elf32_Rel& relocation = relocations[i];
588 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) {
589 CHECK(ELF32_R_SYM(relocation.r_info) == 0);
590 relative_relocations.push_back(relocation);
591 } else {
592 other_relocations.push_back(relocation);
593 }
594 }
595 VLOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
596 VLOG("Other : %lu entries\n", other_relocations.size());
597 VLOG("Total : %lu entries\n", relocations.size());
598
599 // If no relative relocations then we have nothing packable. Perhaps
600 // the shared object has already been packed?
601 if (relative_relocations.empty()) {
602 LOG("No R_ARM_RELATIVE relocations found (already packed?)\n");
603 return false;
604 }
605
606 // Pre-calculate the size of the hole we will close up when we rewrite
607 // .reldyn. We have to adjust all relocation addresses to account for this.
608 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
609 const Elf32_Off hole_start = section_header->sh_offset;
610 const size_t hole_size =
611 relative_relocations.size() * sizeof(relative_relocations[0]);
612
613 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the
614 // hole, and pre-adjust all relocation offsets accordingly.
615 if (!is_padding_rel_dyn_) {
616 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
617 // area it will occupy once the hole in .rel.dyn is removed.
618 ApplyRelocations(elf_, hole_start, -hole_size, relative_relocations);
619 // Relocate the relocations.
620 AdjustRelocations(hole_start, -hole_size, &relative_relocations);
621 AdjustRelocations(hole_start, -hole_size, &other_relocations);
622 }
623
624 // Pack R_ARM_RELATIVE relocations.
625 const size_t initial_bytes =
626 relative_relocations.size() * sizeof(relative_relocations[0]);
627 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", initial_bytes);
628 std::vector<uint8_t> packed;
629 RelocationPacker packer;
630 packer.PackRelativeRelocations(relative_relocations, &packed);
631 const void* packed_data = &packed[0];
632 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
633 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
634
635 // If we have insufficient R_ARM_RELATIVE relocations to form a run then
636 // packing fails.
637 if (packed.empty()) {
638 LOG("Too few R_ARM_RELATIVE relocations to pack\n");
639 return false;
640 }
641
642 // Run a loopback self-test as a check that packing is lossless.
643 std::vector<Elf32_Rel> unpacked;
644 packer.UnpackRelativeRelocations(packed, &unpacked);
645 CHECK(unpacked.size() == relative_relocations.size());
646 for (size_t i = 0; i < unpacked.size(); ++i) {
647 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset);
648 CHECK(unpacked[i].r_info == relative_relocations[i].r_info);
649 }
650
651 // Make sure packing saved some space.
652 if (packed_bytes >= initial_bytes) {
653 LOG("Packing R_ARM_RELATIVE relocations saves no space\n");
654 return false;
655 }
656
657 // If padding, add R_ARM_NONE relocations to other_relocations to make it
658 // the same size as the the original relocations we read in. This makes
659 // the ResizeSection() below a no-op.
660 if (is_padding_rel_dyn_) {
661 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0};
662 const size_t required = relocations.size() - other_relocations.size();
663 std::vector<Elf32_Rel> padding(required, r_arm_none);
664 other_relocations.insert(
665 other_relocations.end(), padding.begin(), padding.end());
666 }
667
668 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE
669 // relocations, then shrink it to size.
670 const void* section_data = &other_relocations[0];
rmcilroy 2014/06/02 15:16:35 nit - new_section_data (or new_data)
671 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]);
672 ResizeSection(elf_, rel_dyn_section_, bytes);
673 RewriteSectionData(data, section_data, bytes);
674
675 // Rewrite the current .android.rel.dyn section to hold the packed
676 // R_ARM_RELATIVE relocations.
677 data = elf_getdata(android_rel_dyn_section_, NULL);
678 CHECK(data && elf_getdata(android_rel_dyn_section_, data) == NULL);
rmcilroy 2014/06/02 15:16:35 You do this CHECK a lot, maybe have a wrapper func
simonb (inactive) 2014/06/04 16:40:35 Done.
679 // Ensure the current section is not zero-length (that is, has allocated
680 // data that we can validly expand).
681 CHECK(data->d_size > 0 && data->d_buf);
rmcilroy 2014/06/02 15:16:35 nit - do this check in ResizeSection?
simonb (inactive) 2014/06/04 16:40:35 Done.
682 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes);
683 RewriteSectionData(data, packed_data, packed_bytes);
684
685 // Rewrite .dynamic to include two new tags describing .android.rel.dyn.
686 data = elf_getdata(dynamic_section_, NULL);
687 CHECK(data && elf_getdata(dynamic_section_, data) == NULL);
688 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
689 std::vector<Elf32_Dyn> dynamics(
690 dynamic_base,
691 dynamic_base + data->d_size / sizeof(dynamics[0]));
692 section_header = elf32_getshdr(android_rel_dyn_section_);
693 // Steal two vacant slots to describe the .android.rel.dyn section.
694 const Elf32_Dyn offset_dyn
695 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}};
696 AddDynamicEntry(offset_dyn, &dynamics);
697 const Elf32_Dyn size_dyn
698 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}};
699 AddDynamicEntry(size_dyn, &dynamics);
700 const void* dynamics_data = &dynamics[0];
701 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
702 RewriteSectionData(data, dynamics_data, dynamics_bytes);
rmcilroy 2014/06/02 15:16:35 nit - lines 686-702 seem like they could be a sepa
simonb (inactive) 2014/06/04 16:40:35 Left as is for now.
703
704 return true;
705 }
706
707 namespace {
708
709 // Remove elements in the dynamics vector that match the given tag with
710 // unused slot data. The first unused slot effectively terminates the
711 // vector, so we can validly only remove items from the end of the vector.
712 void RemoveDynamicEntry(Elf32_Sword tag,
713 std::vector<Elf32_Dyn>* dynamics) {
714 const Elf32_Dyn null_dyn = {DT_NULL, {0}};
715
716 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
rmcilroy 2014/06/02 15:16:35 If you can only remove items from the end of the v
simonb (inactive) 2014/06/04 16:40:35 Rewritten to remove the matching entry and shuffle
717 Elf32_Dyn &slot = dynamics->at(i);
718 if (slot.d_tag == tag) {
719 slot = null_dyn;
720 VLOG("dynamic[%lu] overwritten with DT_NULL\n", i);
721 }
722 }
723 }
724
725 } // namespace
726
727 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them,
728 // and rewrite the .rel.dyn section in so_file to contain unpacked data.
729 bool ElfFile::UnpackRelocations() {
730 // Retrieve the current .android.rel.dyn section data.
731 Elf_Data* data = elf_getdata(android_rel_dyn_section_, NULL);
732 CHECK(data && elf_getdata(android_rel_dyn_section_, data) == NULL);
733
734 // Convert data to a vector of bytes.
735 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf);
736 std::vector<uint8_t> packed(
737 packed_base,
738 packed_base + data->d_size / sizeof(packed[0]));
739
740 // Properly packed data must begin with "APR1".
741 if (packed.empty() ||
742 packed[0] != 'A' || packed[1] != 'P' ||
743 packed[2] != 'R' || packed[3] != '1') {
744 LOG("Packed R_ARM_RELATIVE relocations not found (not packed?)\n");
745 return false;
746 }
747
748 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations.
749 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
750 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
751 std::vector<Elf32_Rel> relative_relocations;
752 RelocationPacker packer;
753 packer.UnpackRelativeRelocations(packed, &relative_relocations);
754 const size_t unpacked_bytes =
755 relative_relocations.size() * sizeof(relative_relocations[0]);
756 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", unpacked_bytes);
757
758 // Retrieve the current .rel.dyn section data.
759 data = elf_getdata(rel_dyn_section_, NULL);
760 CHECK(data && elf_getdata(rel_dyn_section_, data) == NULL);
761
762 // Interpret data as Elf32 relocations.
763 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
764 std::vector<Elf32_Rel> relocations(
765 relocations_base,
766 relocations_base + data->d_size / sizeof(relocations[0]));
767
768 std::vector<Elf32_Rel> other_relocations;
769 size_t padding = 0;
770
771 // Filter relocations to locate any that are R_ARM_NONE. These will occur
772 // if padding was turned on for packing.
773 for (size_t i = 0; i < relocations.size(); ++i) {
774 const Elf32_Rel& relocation = relocations[i];
775 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) {
776 other_relocations.push_back(relocation);
777 } else {
778 ++padding;
779 }
780 }
781 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
782 LOG("Other : %lu entries\n", other_relocations.size());
783
784 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we
785 // hold as unpacked relative relocations, then this is a padded file.
786 const bool is_padded = padding == relative_relocations.size();
787
788 // Pre-calculate the size of the hole we will open up when we rewrite
789 // .reldyn. We have to adjust all relocation addresses to account for this.
790 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
791 const Elf32_Off hole_start = section_header->sh_offset;
792 const size_t hole_size =
793 relative_relocations.size() * sizeof(relative_relocations[0]);
794
795 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the
796 // hole, and pre-adjust all relocation offsets accordingly.
797 if (!is_padded) {
798 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
799 // area it will occupy once the hole in .rel.dyn is opened.
800 ApplyRelocations(elf_, hole_start, hole_size, relative_relocations);
801 // Relocate the relocations.
802 AdjustRelocations(hole_start, hole_size, &relative_relocations);
803 AdjustRelocations(hole_start, hole_size, &other_relocations);
804 }
805
806 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations
807 // followed by other relocations. This is the usual order in which we find
808 // them after linking, so this action will normally put the entire .rel.dyn
809 // section back to its pre-split-and-packed state.
810 relocations.assign(relative_relocations.begin(), relative_relocations.end());
811 relocations.insert(relocations.end(),
812 other_relocations.begin(), other_relocations.end());
813 const void* section_data = &relocations[0];
814 const size_t bytes = relocations.size() * sizeof(relocations[0]);
815 LOG("Total : %lu entries\n", relocations.size());
816 ResizeSection(elf_, rel_dyn_section_, bytes);
817 RewriteSectionData(data, section_data, bytes);
818
819 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte
820 // stub so that some data remains allocated to the section. This is a
821 // convenience which allows us to re-pack this file again without
822 // having to remove the section and then add a new small one with objcopy.
823 // The way we resize sections relies on there being some data in a section.
824 data = elf_getdata(android_rel_dyn_section_, NULL);
825 CHECK(data && elf_getdata(android_rel_dyn_section_, data) == NULL);
826 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier));
827 RewriteSectionData(data, &kStubIdentifier, sizeof(kStubIdentifier));
828
829 // Rewrite .dynamic to remove two tags describing .android.rel.dyn.
830 data = elf_getdata(dynamic_section_, NULL);
831 CHECK(data && elf_getdata(dynamic_section_, data) == NULL);
832 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
833 std::vector<Elf32_Dyn> dynamics(
834 dynamic_base,
835 dynamic_base + data->d_size / sizeof(dynamics[0]));
836 RemoveDynamicEntry(DT_ANDROID_ARM_REL_SIZE, &dynamics);
837 RemoveDynamicEntry(DT_ANDROID_ARM_REL_OFFSET, &dynamics);
838 const void* dynamics_data = &dynamics[0];
839 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
840 RewriteSectionData(data, dynamics_data, dynamics_bytes);
841
842 return true;
843 }
844
845 // Flush rewritten shared object file data.
846 void ElfFile::Flush() {
847 // Flag all ELF data held in memory as needing to be written back to the
848 // file, and tell libelf that we have controlled the file layout.
849 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY);
850 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT);
851
852 // Write ELF data back to disk.
853 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE);
854 CHECK(file_bytes > 0);
855 VLOG("elf_update returned: %lu\n", file_bytes);
856
857 // Clean up libelf, and truncate the output file to the number of bytes
858 // written by elf_update().
859 elf_end(elf_);
860 const int truncate = ftruncate(fd_, file_bytes);
861 CHECK(truncate == 0);
862 }
863
864 } // namespace relocation_packer
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698