Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(153)

Side by Side Diff: tools/relocation_packer/src/elf_file.cc

Issue 310483003: Add a host tool to pack R_ARM_RELATIVE relocations in libchrome.<ver>.so. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Small readability and test data changes. Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // TODO(simonb): Extend for 64-bit target libraries.
6
7 #include "elf_file.h"
8
9 #include <stdlib.h>
10 #include <sys/types.h>
11 #include <unistd.h>
12 #include <string>
13 #include <vector>
14
15 #include "debug.h"
16 #include "libelf.h"
17 #include "packer.h"
18
19 namespace relocation_packer {
20
21 // Stub identifier written to 'null out' packed data, "NULL".
22 static const Elf32_Word kStubIdentifier = 0x4c4c554eu;
23
24 // Out-of-band dynamic tags used to indicate the offset and size of the
25 // .android.rel.dyn section.
26 static const Elf32_Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC;
27 static const Elf32_Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1;
28
29 namespace {
30
31 // Get section data. Checks that the section has exactly one data entry,
32 // so that the section size and the data size are the same. True in
33 // practice for all sections we resize when packing or unpacking. Done
34 // by ensuring that a call to elf_getdata(section, data) returns NULL as
35 // the next data entry.
36 Elf_Data* GetSectionData(Elf_Scn* section) {
37 Elf_Data* data = elf_getdata(section, NULL);
38 CHECK(data && elf_getdata(section, data) == NULL);
39 return data;
40 }
41
42 // Rewrite section data. Allocates new data and makes it the data element's
43 // buffer. Relies on program exit to free allocated data.
44 void RewriteSectionData(Elf_Data* data,
45 const void* section_data,
46 size_t size) {
47 CHECK(size == data->d_size);
48 uint8_t* area = new uint8_t[size];
49 memcpy(area, section_data, size);
50 data->d_buf = area;
51 }
52
53 // Helper for ResizeSection(). Adjust the main ELF header for the hole.
54 void AdjustElfHeaderForHole(Elf32_Ehdr* elf_header,
55 Elf32_Off hole_start,
56 int32_t hole_size) {
57 if (elf_header->e_phoff > hole_start) {
58 elf_header->e_phoff += hole_size;
59 VLOG("e_phoff adjusted to %u\n", elf_header->e_phoff);
60 }
61 if (elf_header->e_shoff > hole_start) {
62 elf_header->e_shoff += hole_size;
63 VLOG("e_shoff adjusted to %u\n", elf_header->e_shoff);
64 }
65 }
66
67 // Helper for ResizeSection(). Adjust all program headers for the hole.
68 void AdjustProgramHeadersForHole(Elf32_Phdr* elf_program_header,
69 size_t program_header_count,
70 Elf32_Off hole_start,
71 int32_t hole_size) {
72 for (size_t i = 0; i < program_header_count; ++i) {
73 Elf32_Phdr* program_header = &elf_program_header[i];
74
75 if (program_header->p_offset > hole_start) {
76 // The hole start is past this segment, so adjust offsets and addrs.
77 program_header->p_offset += hole_size;
78 VLOG("phdr %lu p_offset adjusted to %u\n", i, program_header->p_offset);
79
80 // Only adjust vaddr and paddr if this program header has them.
81 if (program_header->p_vaddr != 0) {
82 program_header->p_vaddr += hole_size;
83 VLOG("phdr %lu p_vaddr adjusted to %u\n", i, program_header->p_vaddr);
84 }
85 if (program_header->p_paddr != 0) {
86 program_header->p_paddr += hole_size;
87 VLOG("phdr %lu p_paddr adjusted to %u\n", i, program_header->p_paddr);
88 }
89 } else if (program_header->p_offset +
90 program_header->p_filesz > hole_start) {
91 // The hole start is within this segment, so adjust file and in-memory
92 // sizes, but leave offsets and addrs unchanged.
93 program_header->p_filesz += hole_size;
94 VLOG("phdr %lu p_filesz adjusted to %u\n", i, program_header->p_filesz);
95 program_header->p_memsz += hole_size;
96 VLOG("phdr %lu p_memsz adjusted to %u\n", i, program_header->p_memsz);
97 }
98 }
99 }
100
101 // Helper for ResizeSection(). Adjust all section headers for the hole.
102 void AdjustSectionHeadersForHole(Elf* elf,
103 Elf32_Off hole_start,
104 int32_t hole_size) {
105 size_t string_index;
106 elf_getshdrstrndx(elf, &string_index);
107
108 Elf_Scn* section = NULL;
109 while ((section = elf_nextscn(elf, section)) != NULL) {
110 Elf32_Shdr* section_header = elf32_getshdr(section);
111 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
112
113 if (section_header->sh_offset > hole_start) {
114 section_header->sh_offset += hole_size;
115 VLOG("section %s sh_offset"
116 " adjusted to %u\n", name.c_str(), section_header->sh_offset);
117 // Only adjust section addr if this section has one.
118 if (section_header->sh_addr != 0) {
119 section_header->sh_addr += hole_size;
120 VLOG("section %s sh_addr"
121 " adjusted to %u\n", name.c_str(), section_header->sh_addr);
122 }
123 }
124 }
125 }
126
127 // Helper for ResizeSection(). Adjust the .dynamic section for the hole.
128 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section,
129 bool is_rel_dyn_resize,
130 Elf32_Off hole_start,
131 int32_t hole_size) {
132 Elf_Data* data = GetSectionData(dynamic_section);
133
134 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
135 std::vector<Elf32_Dyn> dynamics(
136 dynamic_base,
137 dynamic_base + data->d_size / sizeof(dynamics[0]));
138
139 for (size_t i = 0; i < dynamics.size(); ++i) {
140 Elf32_Dyn* dynamic = &dynamics[i];
141 const Elf32_Sword tag = dynamic->d_tag;
142 // Any tags that hold offsets are adjustment candidates.
143 const bool is_adjustable = (tag == DT_PLTGOT ||
144 tag == DT_HASH ||
145 tag == DT_STRTAB ||
146 tag == DT_SYMTAB ||
147 tag == DT_RELA ||
148 tag == DT_INIT ||
149 tag == DT_FINI ||
150 tag == DT_REL ||
151 tag == DT_JMPREL ||
152 tag == DT_INIT_ARRAY ||
153 tag == DT_FINI_ARRAY ||
154 tag == DT_ANDROID_ARM_REL_OFFSET);
155 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) {
156 dynamic->d_un.d_ptr += hole_size;
157 VLOG("dynamic[%lu] %u"
158 " d_ptr adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_ptr);
159 }
160
161 // If we are specifically resizing .rel.dyn, we need to make some added
162 // adjustments to tags that indicate the counts of R_ARM_RELATIVE
163 // relocations in the shared object.
164 if (is_rel_dyn_resize) {
165 // DT_RELSZ is the overall size of relocations. Adjust by hole size.
166 if (tag == DT_RELSZ) {
167 dynamic->d_un.d_val += hole_size;
168 VLOG("dynamic[%lu] %u"
169 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
170 }
171
172 // The crazy linker does not use DT_RELCOUNT, but we keep it updated
173 // anyway. In practice the section hole is always equal to the size
174 // of R_ARM_RELATIVE relocations, and DT_RELCOUNT is the count of
175 // relative relocations. So closing a hole on packing reduces
176 // DT_RELCOUNT to zero, and opening a hole on unpacking restores it to
177 // its pre-packed value.
178 if (tag == DT_RELCOUNT) {
179 dynamic->d_un.d_val += hole_size / sizeof(Elf32_Rel);
180 VLOG("dynamic[%lu] %u"
181 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
182 }
183
184 // DT_RELENT doesn't change, but make sure it is what we expect.
185 if (tag == DT_RELENT) {
186 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel));
187 }
188 }
189 }
190
191 void* section_data = &dynamics[0];
192 size_t bytes = dynamics.size() * sizeof(dynamics[0]);
193 RewriteSectionData(data, section_data, bytes);
194 }
195
196 // Helper for ResizeSection(). Adjust the .dynsym section for the hole.
197 // We need to adjust the values for the symbols represented in it.
198 void AdjustDynSymSectionForHole(Elf_Scn* dynsym_section,
199 Elf32_Off hole_start,
200 int32_t hole_size) {
201 Elf_Data* data = GetSectionData(dynsym_section);
202
203 const Elf32_Sym* dynsym_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
204 std::vector<Elf32_Sym> dynsyms
205 (dynsym_base,
206 dynsym_base + data->d_size / sizeof(dynsyms[0]));
207
208 for (size_t i = 0; i < dynsyms.size(); ++i) {
209 Elf32_Sym* dynsym = &dynsyms[i];
210 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info));
211 const bool is_adjustable = (type == STT_OBJECT ||
212 type == STT_FUNC ||
213 type == STT_SECTION ||
214 type == STT_FILE ||
215 type == STT_COMMON ||
216 type == STT_TLS);
217 if (is_adjustable && dynsym->st_value > hole_start) {
218 dynsym->st_value += hole_size;
219 VLOG("dynsym[%lu] type=%u"
220 " st_value adjusted to %u\n", i, type, dynsym->st_value);
221 }
222 }
223
224 void* section_data = &dynsyms[0];
225 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]);
226 RewriteSectionData(data, section_data, bytes);
227 }
228
229 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole.
230 // We need to adjust the offset of every relocation inside it that falls
231 // beyond the hole start.
232 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section,
233 Elf32_Off hole_start,
234 int32_t hole_size) {
235 Elf_Data* data = GetSectionData(relplt_section);
236
237 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
238 std::vector<Elf32_Rel> relplts(
239 relplt_base,
240 relplt_base + data->d_size / sizeof(relplts[0]));
241
242 for (size_t i = 0; i < relplts.size(); ++i) {
243 Elf32_Rel* relplt = &relplts[i];
244 if (relplt->r_offset > hole_start) {
245 relplt->r_offset += hole_size;
246 VLOG("relplt[%lu] r_offset adjusted to %u\n", i, relplt->r_offset);
247 }
248 }
249
250 void* section_data = &relplts[0];
251 size_t bytes = relplts.size() * sizeof(relplts[0]);
252 RewriteSectionData(data, section_data, bytes);
253 }
254
255 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section,
256 Elf32_Off hole_start,
257 int32_t hole_size) {
258 Elf_Data* data = GetSectionData(symtab_section);
259
260 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
261 std::vector<Elf32_Sym> symtab(
262 symtab_base,
263 symtab_base + data->d_size / sizeof(symtab[0]));
264
265 for (size_t i = 0; i < symtab.size(); ++i) {
266 Elf32_Sym* sym = &symtab[i];
267 if (sym->st_value > hole_start) {
268 sym->st_value += hole_size;
269 VLOG("symtab[%lu] value adjusted to %u\n", i, sym->st_value);
270 }
271 }
272
273 void* section_data = &symtab[0];
274 size_t bytes = symtab.size() * sizeof(symtab[0]);
275 RewriteSectionData(data, section_data, bytes);
276 }
277
278 // Resize a section. If the new size is larger than the current size, open
279 // up a hole by increasing file offsets that come after the hole. If smaller
280 // than the current size, remove the hole by decreasing those offsets.
281 void ResizeSection(Elf* elf, Elf_Scn* section, size_t new_size) {
282 Elf32_Shdr* section_header = elf32_getshdr(section);
283 if (section_header->sh_size == new_size)
284 return;
285
286 // Note if we are resizing the real .rel.dyn. If yes, then we have to
287 // massage d_un.d_val in the dynamic section where d_tag is DT_RELSZ and
288 // DT_RELCOUNT.
289 size_t string_index;
290 elf_getshdrstrndx(elf, &string_index);
291 const std::string section_name =
292 elf_strptr(elf, string_index, section_header->sh_name);
293 const bool is_rel_dyn_resize = section_name == ".rel.dyn";
294
295 // Require that the section size and the data size are the same. True
296 // in practice for all sections we resize when packing or unpacking.
297 Elf_Data* data = GetSectionData(section);
298 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size);
299
300 // Require that the section is not zero-length (that is, has allocated
301 // data that we can validly expand).
302 CHECK(data->d_size && data->d_buf);
303
304 const Elf32_Off hole_start = section_header->sh_offset;
305 const int32_t hole_size = new_size - data->d_size;
306
307 VLOG_IF(hole_size > 0, "expand section size = %lu\n", data->d_size);
308 VLOG_IF(hole_size < 0, "shrink section size = %lu\n", data->d_size);
309
310 // Resize the data and the section header.
311 data->d_size += hole_size;
312 section_header->sh_size += hole_size;
313
314 Elf32_Ehdr* elf_header = elf32_getehdr(elf);
315 Elf32_Phdr* elf_program_header = elf32_getphdr(elf);
316
317 // Add the hole size to all offsets in the ELF file that are after the
318 // start of the hole. If the hole size is positive we are expanding the
319 // section to create a new hole; if negative, we are closing up a hole.
320
321 // Start with the main ELF header.
322 AdjustElfHeaderForHole(elf_header, hole_start, hole_size);
323
324 // Adjust all program headers.
325 AdjustProgramHeadersForHole(elf_program_header,
326 elf_header->e_phnum,
327 hole_start,
328 hole_size);
329
330 // Adjust all section headers.
331 AdjustSectionHeadersForHole(elf, hole_start, hole_size);
332
333 // We use the dynamic program header entry to locate the dynamic section.
334 const Elf32_Phdr* dynamic_program_header = NULL;
335
336 // Find the dynamic program header entry.
337 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
338 Elf32_Phdr* program_header = &elf_program_header[i];
339
340 if (program_header->p_type == PT_DYNAMIC) {
341 dynamic_program_header = program_header;
342 }
343 }
344 CHECK(dynamic_program_header);
345
346 // Sections requiring special attention, and the .android.rel.dyn offset.
347 Elf_Scn* dynamic_section = NULL;
348 Elf_Scn* dynsym_section = NULL;
349 Elf_Scn* relplt_section = NULL;
350 Elf_Scn* symtab_section = NULL;
351 Elf32_Off android_rel_dyn_offset = 0;
352
353 // Find these sections, and the .android.rel.dyn offset.
354 section = NULL;
355 while ((section = elf_nextscn(elf, section)) != NULL) {
356 Elf32_Shdr* section_header = elf32_getshdr(section);
357 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
358
359 if (section_header->sh_offset == dynamic_program_header->p_offset) {
360 dynamic_section = section;
361 }
362 if (name == ".dynsym") {
363 dynsym_section = section;
364 }
365 if (name == ".rel.plt") {
366 relplt_section = section;
367 }
368 if (name == ".symtab") {
369 symtab_section = section;
370 }
371
372 // Note .android.rel.dyn offset.
373 if (name == ".android.rel.dyn") {
374 android_rel_dyn_offset = section_header->sh_offset;
375 }
376 }
377 CHECK(dynamic_section != NULL);
378 CHECK(dynsym_section != NULL);
379 CHECK(relplt_section != NULL);
380 CHECK(android_rel_dyn_offset != 0);
381
382 // Adjust the .dynamic section for the hole. Because we have to edit the
383 // current contents of .dynamic we disallow resizing it.
384 CHECK(section != dynamic_section);
385 AdjustDynamicSectionForHole(dynamic_section,
386 is_rel_dyn_resize,
387 hole_start,
388 hole_size);
389
390 // Adjust the .dynsym section for the hole.
391 AdjustDynSymSectionForHole(dynsym_section, hole_start, hole_size);
392
393 // Adjust the .rel.plt section for the hole.
394 AdjustRelPltSectionForHole(relplt_section, hole_start, hole_size);
395
396 // Adjust the .symtab section for the hole. .symtab may be absent if
397 // the shared library was stripped.
398 if (symtab_section) {
rmcilroy 2014/06/07 11:49:07 could you pull this section out into an AdjustSymT
simonb (inactive) 2014/06/09 14:39:19 Doh. Already pulled out, just forgot to call it i
399 // Load the .symtab section into a local array. We need to adjust the
400 // offset of every relocation inside it that falls beyond the hole start.
401 data = GetSectionData(symtab_section);
402
403 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
404 std::vector<Elf32_Sym> symtab(
405 symtab_base,
406 symtab_base + data->d_size / sizeof(symtab[0]));
407
408 for (size_t i = 0; i < symtab.size(); ++i) {
409 Elf32_Sym* sym = &symtab[i];
410 if (sym->st_value > hole_start) {
411 sym->st_value += hole_size;
412 VLOG("symtab[%lu] value adjusted to %u\n", i, sym->st_value);
413 }
414 }
415
416 void* section_data = &symtab[0];
417 size_t bytes = symtab.size() * sizeof(symtab[0]);
418 RewriteSectionData(data, section_data, bytes);
419 }
420 }
421
422 // Verbose ELF header logging.
423 void VerboseLogElfHeader(const Elf32_Ehdr* elf_header) {
424 VLOG("e_phoff = %u\n", elf_header->e_phoff);
425 VLOG("e_shoff = %u\n", elf_header->e_shoff);
426 VLOG("e_ehsize = %u\n", elf_header->e_ehsize);
427 VLOG("e_phentsize = %u\n", elf_header->e_phentsize);
428 VLOG("e_phnum = %u\n", elf_header->e_phnum);
429 VLOG("e_shnum = %u\n", elf_header->e_shnum);
430 VLOG("e_shstrndx = %u\n", elf_header->e_shstrndx);
431 }
432
433 // Verbose ELF program header logging.
434 void VerboseLogProgramHeader(size_t program_header_index,
435 const Elf32_Phdr* program_header) {
436 std::string type;
437 switch (program_header->p_type) {
438 case PT_NULL: type = "NULL"; break;
439 case PT_LOAD: type = "LOAD"; break;
440 case PT_DYNAMIC: type = "DYNAMIC"; break;
441 case PT_INTERP: type = "INTERP"; break;
442 case PT_NOTE: type = "NOTE"; break;
443 case PT_SHLIB: type = "SHLIB"; break;
444 case PT_PHDR: type = "PHDR"; break;
445 case PT_TLS: type = "TLS"; break;
446 default: type = "(OTHER)"; break;
447 }
448 VLOG("phdr %lu : %s\n", program_header_index, type.c_str());
449 VLOG(" p_offset = %u\n", program_header->p_offset);
450 VLOG(" p_vaddr = %u\n", program_header->p_vaddr);
451 VLOG(" p_paddr = %u\n", program_header->p_paddr);
452 VLOG(" p_filesz = %u\n", program_header->p_filesz);
453 VLOG(" p_memsz = %u\n", program_header->p_memsz);
454 }
455
456 // Verbose ELF section header logging.
457 void VerboseLogSectionHeader(const std::string& section_name,
458 const Elf32_Shdr* section_header) {
459 VLOG("section %s\n", section_name.c_str());
460 VLOG(" sh_addr = %u\n", section_header->sh_addr);
461 VLOG(" sh_offset = %u\n", section_header->sh_offset);
462 VLOG(" sh_size = %u\n", section_header->sh_size);
463 }
464
465 // Verbose ELF section data logging.
466 void VerboseLogSectionData(const Elf_Data* data) {
467 VLOG(" data\n");
468 VLOG(" d_buf = %p\n", data->d_buf);
469 VLOG(" d_off = %lu\n", data->d_off);
470 VLOG(" d_size = %lu\n", data->d_size);
471 }
472
473 } // namespace
474
475 // Load the complete ELF file into a memory image in libelf, and identify
476 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the
477 // ELF file has already been loaded.
478 bool ElfFile::Load() {
479 if (elf_)
480 return true;
481
482 elf_ = elf_begin(fd_, ELF_C_RDWR, NULL);
483 CHECK(elf_);
484
485 if (elf_kind(elf_) != ELF_K_ELF) {
486 LOG("File not in ELF format\n");
487 return false;
488 }
489
490 Elf32_Ehdr* elf_header = elf32_getehdr(elf_);
491 if (!elf_header) {
492 LOG("Failed to load ELF header\n");
493 return false;
494 }
495 if (elf_header->e_machine != EM_ARM) {
496 LOG("File is not an arm32 ELF file\n");
497 return false;
498 }
499
500 // Require that our endianness matches that of the target, and that both
501 // are little-endian. Safe for all current build/target combinations.
502 const int endian = static_cast<int>(elf_header->e_ident[5]);
503 CHECK(endian == ELFDATA2LSB);
504 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
505
506 VLOG("endian = %u\n", endian);
507 VerboseLogElfHeader(elf_header);
508
509 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_);
510 CHECK(elf_program_header);
511
512 const Elf32_Phdr* dynamic_program_header = NULL;
513 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
514 const Elf32_Phdr* program_header = &elf_program_header[i];
515 VerboseLogProgramHeader(i, program_header);
516
517 if (program_header->p_type == PT_DYNAMIC) {
518 CHECK(dynamic_program_header == NULL);
519 dynamic_program_header = program_header;
520 }
521 }
522 CHECK(dynamic_program_header != NULL);
523
524 size_t string_index;
525 elf_getshdrstrndx(elf_, &string_index);
526
527 // Notes of the .rel.dyn, .android.rel.dyn, and .dynamic sections. Found
528 // while iterating sections, and later stored in class attributes.
529 Elf_Scn* found_rel_dyn_section = NULL;
530 Elf_Scn* found_android_rel_dyn_section = NULL;
531 Elf_Scn* found_dynamic_section = NULL;
532
533 // Flag set if we encounter any .debug* section. We do not adjust any
534 // offsets or addresses of any debug data, so if we find one of these then
535 // the resulting output shared object should still run, but might not be
536 // usable for debugging, disassembly, and so on. Provides a warning if
537 // this occurs.
538 bool has_debug_section = false;
539
540 Elf_Scn* section = NULL;
541 while ((section = elf_nextscn(elf_, section)) != NULL) {
542 const Elf32_Shdr* section_header = elf32_getshdr(section);
543 std::string name = elf_strptr(elf_, string_index, section_header->sh_name);
544 VerboseLogSectionHeader(name, section_header);
545
546 // Note special sections as we encounter them.
547 if (name == ".rel.dyn") {
548 found_rel_dyn_section = section;
549 }
550 if (name == ".android.rel.dyn") {
551 found_android_rel_dyn_section = section;
552 }
553 if (section_header->sh_offset == dynamic_program_header->p_offset) {
554 found_dynamic_section = section;
555 }
556
557 // If we find a section named .debug*, set the debug warning flag.
558 if (std::string(name).find(".debug") == 0) {
559 has_debug_section = true;
560 }
561
562 Elf_Data* data = NULL;
563 while ((data = elf_getdata(section, data)) != NULL) {
564 VerboseLogSectionData(data);
565 }
566 }
567
568 // Loading failed if we did not find the required special sections.
569 if (!found_rel_dyn_section) {
570 LOG("Missing .rel.dyn section\n");
571 return false;
572 }
573 if (!found_dynamic_section) {
574 LOG("Missing .dynamic section\n");
575 return false;
576 }
577 if (!found_android_rel_dyn_section) {
578 LOG("Missing .android.rel.dyn section (not split/packed?)\n");
rmcilroy 2014/06/07 11:49:07 ERROR: Missing required .android.rel.dyn section.
simonb (inactive) 2014/06/09 14:39:19 Done. (#relocation_packer --help' prints full ins
579 return false;
580 }
581
582 if (has_debug_section) {
583 LOG("WARNING: found .debug section(s), and ignored them\n");
584 }
585
586 rel_dyn_section_ = found_rel_dyn_section;
587 dynamic_section_ = found_dynamic_section;
588 android_rel_dyn_section_ = found_android_rel_dyn_section;
589 return true;
590 }
591
592 namespace {
593
594 // Replace the first free (unused) slot in a dynamics vector with the given
595 // value. The vector always ends with a free (unused) element, so the slot
596 // found cannot be the last one in the vector.
597 void AddDynamicEntry(Elf32_Dyn dyn,
598 std::vector<Elf32_Dyn>* dynamics) {
599 // Loop until the penultimate entry. We cannot replace the end sentinel.
600 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
601 Elf32_Dyn &slot = dynamics->at(i);
602 if (slot.d_tag == DT_NULL) {
603 slot = dyn;
604 VLOG("dynamic[%lu] overwritten with %u\n", i, dyn.d_tag);
605 return;
606 }
607 }
608
609 // No free dynamics vector slot was found.
610 VLOG("No spare dynamic vector slots found "
611 "(to fix, increase gold's --spare-dynamic-tags value)\n");
612 NOTREACHED();
613 }
614
615 // Apply R_ARM_RELATIVE relocations to the file data to which they refer.
616 // This relocates data into the area it will occupy after the hole in
617 // .rel.dyn is added or removed.
618 void AdjustRelocationTargets(Elf* elf,
619 Elf32_Off hole_start,
620 size_t hole_size,
621 const std::vector<Elf32_Rel>& relocations) {
622 Elf_Scn* section = NULL;
623 while ((section = elf_nextscn(elf, section)) != NULL) {
624 const Elf32_Shdr* section_header = elf32_getshdr(section);
625
626 // Identify this section's start and end addresses.
627 const Elf32_Addr section_start = section_header->sh_addr;
628 const Elf32_Addr section_end = section_start + section_header->sh_size;
629
630 Elf_Data* data = GetSectionData(section);
631
632 // Ignore sections with no effective data.
633 if (data->d_buf == NULL)
634 continue;
635
636 // Create a copy-on-write pointer to the section's data.
637 uint8_t* area = reinterpret_cast<uint8_t*>(data->d_buf);
638
639 for (size_t i = 0; i < relocations.size(); ++i) {
640 const Elf32_Rel* relocation = &relocations[i];
641 CHECK(ELF32_R_TYPE(relocation->r_info) == R_ARM_RELATIVE);
642
643 // See if this relocation points into the current section.
644 if (relocation->r_offset >= section_start &&
645 relocation->r_offset < section_end) {
646 Elf32_Addr byte_offset = relocation->r_offset - section_start;
647 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
648
649 // Is the relocation's target after the hole's start?
650 if (*target > hole_start) {
651
652 // Copy on first write. Recompute target to point into the newly
653 // allocated buffer.
654 if (area == data->d_buf) {
655 area = new uint8_t[data->d_size];
656 memcpy(area, data->d_buf, data->d_size);
657 target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
658 }
659
660 *target += hole_size;
661 VLOG("relocation[%lu] target adjusted to %u\n", i, *target);
662 }
663 }
664 }
665
666 // If we applied any relocation to this section, write it back.
667 if (area != data->d_buf) {
668 RewriteSectionData(data, area, data->d_size);
669 delete [] area;
670 }
671 }
672 }
673
674 // Adjust relocations so that the offset that they indicate will be correct
675 // after the hole in .rel.dyn is added or removed (in effect, relocate the
676 // relocations).
677 void AdjustRelocations(Elf32_Off hole_start,
678 size_t hole_size,
679 std::vector<Elf32_Rel>* relocations) {
680 for (size_t i = 0; i < relocations->size(); ++i) {
681 Elf32_Rel* relocation = &relocations->at(i);
682 if (relocation->r_offset > hole_start) {
683 relocation->r_offset += hole_size;
684 VLOG("relocation[%lu] offset adjusted to %u\n", i, relocation->r_offset);
685 }
686 }
687 }
688
689 } // namespace
690
691 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data
692 // into .android.rel.dyn.
693 bool ElfFile::PackRelocations() {
694 // Load the ELF file into libelf.
695 if (!Load()) {
696 LOG("Failed to load as ELF (elf_error=%d)\n", elf_errno());
697 return false;
698 }
699
700 // Retrieve the current .rel.dyn section data.
701 Elf_Data* data = GetSectionData(rel_dyn_section_);
702
703 // Convert data to a vector of Elf32 relocations.
704 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
705 std::vector<Elf32_Rel> relocations(
706 relocations_base,
707 relocations_base + data->d_size / sizeof(relocations[0]));
708
709 std::vector<Elf32_Rel> relative_relocations;
710 std::vector<Elf32_Rel> other_relocations;
711
712 // Filter relocations into those that are R_ARM_RELATIVE and others.
713 for (size_t i = 0; i < relocations.size(); ++i) {
714 const Elf32_Rel& relocation = relocations[i];
715 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) {
716 CHECK(ELF32_R_SYM(relocation.r_info) == 0);
717 relative_relocations.push_back(relocation);
718 } else {
719 other_relocations.push_back(relocation);
720 }
721 }
722 VLOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
723 VLOG("Other : %lu entries\n", other_relocations.size());
724 VLOG("Total : %lu entries\n", relocations.size());
725
726 // If no relative relocations then we have nothing packable. Perhaps
727 // the shared object has already been packed?
728 if (relative_relocations.empty()) {
729 LOG("No R_ARM_RELATIVE relocations found (already packed?)\n");
730 return false;
731 }
732
733 // Pre-calculate the size of the hole we will close up when we rewrite
734 // .reldyn. We have to adjust all relocation addresses to account for this.
735 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
736 const Elf32_Off hole_start = section_header->sh_offset;
737 const size_t hole_size =
738 relative_relocations.size() * sizeof(relative_relocations[0]);
739
740 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the
741 // hole, and pre-adjust all relocation offsets accordingly.
742 if (!is_padding_rel_dyn_) {
743 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
744 // area it will occupy once the hole in .rel.dyn is removed.
745 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations);
746 // Relocate the relocations.
747 AdjustRelocations(hole_start, -hole_size, &relative_relocations);
748 AdjustRelocations(hole_start, -hole_size, &other_relocations);
749 }
750
751 // Pack R_ARM_RELATIVE relocations.
752 const size_t initial_bytes =
753 relative_relocations.size() * sizeof(relative_relocations[0]);
754 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", initial_bytes);
755 std::vector<uint8_t> packed;
756 RelocationPacker packer;
757 packer.PackRelativeRelocations(relative_relocations, &packed);
rmcilroy 2014/06/07 11:49:07 How about making PackRelativeRelocations return a
simonb (inactive) 2014/06/09 14:39:19 Insufficient relocations to form a run is a pathol
rmcilroy 2014/06/09 14:47:23 I figured this was the case - the suggestion wasn'
758 const void* packed_data = &packed[0];
759 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
760 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
761
762 // If we have insufficient R_ARM_RELATIVE relocations to form a run then
763 // packing fails.
764 if (packed.empty()) {
765 LOG("Too few R_ARM_RELATIVE relocations to pack\n");
766 return false;
767 }
768
769 // Run a loopback self-test as a check that packing is lossless.
770 std::vector<Elf32_Rel> unpacked;
771 packer.UnpackRelativeRelocations(packed, &unpacked);
772 CHECK(unpacked.size() == relative_relocations.size());
773 for (size_t i = 0; i < unpacked.size(); ++i) {
774 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset);
775 CHECK(unpacked[i].r_info == relative_relocations[i].r_info);
776 }
777
778 // Make sure packing saved some space.
779 if (packed_bytes >= initial_bytes) {
780 LOG("Packing R_ARM_RELATIVE relocations saves no space\n");
781 return false;
782 }
783
784 // If padding, add R_ARM_NONE relocations to other_relocations to make it
785 // the same size as the the original relocations we read in. This makes
786 // the ResizeSection() below a no-op.
787 if (is_padding_rel_dyn_) {
788 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0};
789 const size_t required = relocations.size() - other_relocations.size();
790 std::vector<Elf32_Rel> padding(required, r_arm_none);
791 other_relocations.insert(
792 other_relocations.end(), padding.begin(), padding.end());
793 }
794
795 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE
796 // relocations, then shrink it to size.
797 const void* section_data = &other_relocations[0];
798 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]);
799 ResizeSection(elf_, rel_dyn_section_, bytes);
800 RewriteSectionData(data, section_data, bytes);
801
802 // Rewrite the current .android.rel.dyn section to hold the packed
803 // R_ARM_RELATIVE relocations.
804 data = GetSectionData(android_rel_dyn_section_);
805 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes);
806 RewriteSectionData(data, packed_data, packed_bytes);
807
808 // Rewrite .dynamic to include two new tags describing .android.rel.dyn.
809 data = GetSectionData(dynamic_section_);
810 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
811 std::vector<Elf32_Dyn> dynamics(
812 dynamic_base,
813 dynamic_base + data->d_size / sizeof(dynamics[0]));
814 section_header = elf32_getshdr(android_rel_dyn_section_);
815 // Use two of the spare slots to describe the .android.rel.dyn section.
816 const Elf32_Dyn offset_dyn
817 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}};
818 AddDynamicEntry(offset_dyn, &dynamics);
819 const Elf32_Dyn size_dyn
820 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}};
821 AddDynamicEntry(size_dyn, &dynamics);
822 const void* dynamics_data = &dynamics[0];
823 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
824 RewriteSectionData(data, dynamics_data, dynamics_bytes);
825
826 Flush();
827 return true;
828 }
829
830 namespace {
831
832 // Remove the element in the dynamics vector that matches the given tag with
833 // unused slot data. Shuffle the following elements up, and ensure that the
834 // last is the null sentinel.
835 void RemoveDynamicEntry(Elf32_Sword tag,
836 std::vector<Elf32_Dyn>* dynamics) {
837 // Loop until the penultimate entry, and never match the end sentinel.
838 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
839 Elf32_Dyn &slot = dynamics->at(i);
840 if (slot.d_tag == tag) {
841 for ( ; i < dynamics->size() - 1; ++i) {
842 dynamics->at(i) = dynamics->at(i + 1);
843 VLOG("dynamic[%lu] overwritten with dynamic[%lu]\n", i, i + 1);
844 }
845 CHECK(dynamics->at(i).d_tag == DT_NULL);
846 return;
847 }
848 }
849
850 // No matching dynamics vector entry was found.
851 NOTREACHED();
852 }
853
854 } // namespace
855
856 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them,
857 // and rewrite the .rel.dyn section in so_file to contain unpacked data.
858 bool ElfFile::UnpackRelocations() {
859 // Load the ELF file into libelf.
860 if (!Load()) {
861 LOG("Failed to load as ELF (elf_error=%d)\n", elf_errno());
862 return false;
863 }
864
865 // Retrieve the current .android.rel.dyn section data.
866 Elf_Data* data = GetSectionData(android_rel_dyn_section_);
867
868 // Convert data to a vector of bytes.
869 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf);
870 std::vector<uint8_t> packed(
871 packed_base,
872 packed_base + data->d_size / sizeof(packed[0]));
873
874 // Properly packed data must begin with "APR1".
875 if (packed.empty() ||
876 packed[0] != 'A' || packed[1] != 'P' ||
877 packed[2] != 'R' || packed[3] != '1') {
878 LOG("Packed R_ARM_RELATIVE relocations not found (not packed?)\n");
879 return false;
880 }
881
882 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations.
883 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
884 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
885 std::vector<Elf32_Rel> relative_relocations;
886 RelocationPacker packer;
887 packer.UnpackRelativeRelocations(packed, &relative_relocations);
888 const size_t unpacked_bytes =
889 relative_relocations.size() * sizeof(relative_relocations[0]);
890 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", unpacked_bytes);
891
892 // Retrieve the current .rel.dyn section data.
893 data = GetSectionData(rel_dyn_section_);
894
895 // Interpret data as Elf32 relocations.
896 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
897 std::vector<Elf32_Rel> relocations(
898 relocations_base,
899 relocations_base + data->d_size / sizeof(relocations[0]));
900
901 std::vector<Elf32_Rel> other_relocations;
902 size_t padding = 0;
903
904 // Filter relocations to locate any that are R_ARM_NONE. These will occur
905 // if padding was turned on for packing.
906 for (size_t i = 0; i < relocations.size(); ++i) {
907 const Elf32_Rel& relocation = relocations[i];
908 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) {
909 other_relocations.push_back(relocation);
910 } else {
911 ++padding;
912 }
913 }
914 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
915 LOG("Other : %lu entries\n", other_relocations.size());
916
917 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we
918 // hold as unpacked relative relocations, then this is a padded file.
919 const bool is_padded = padding == relative_relocations.size();
920
921 // Pre-calculate the size of the hole we will open up when we rewrite
922 // .reldyn. We have to adjust all relocation addresses to account for this.
923 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
924 const Elf32_Off hole_start = section_header->sh_offset;
925 const size_t hole_size =
926 relative_relocations.size() * sizeof(relative_relocations[0]);
927
928 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the
929 // hole, and pre-adjust all relocation offsets accordingly.
930 if (!is_padded) {
931 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
932 // area it will occupy once the hole in .rel.dyn is opened.
933 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations);
934 // Relocate the relocations.
935 AdjustRelocations(hole_start, hole_size, &relative_relocations);
936 AdjustRelocations(hole_start, hole_size, &other_relocations);
937 }
938
939 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations
940 // followed by other relocations. This is the usual order in which we find
941 // them after linking, so this action will normally put the entire .rel.dyn
942 // section back to its pre-split-and-packed state.
943 relocations.assign(relative_relocations.begin(), relative_relocations.end());
944 relocations.insert(relocations.end(),
945 other_relocations.begin(), other_relocations.end());
946 const void* section_data = &relocations[0];
947 const size_t bytes = relocations.size() * sizeof(relocations[0]);
948 LOG("Total : %lu entries\n", relocations.size());
949 ResizeSection(elf_, rel_dyn_section_, bytes);
950 RewriteSectionData(data, section_data, bytes);
951
952 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte
953 // stub so that some data remains allocated to the section. This is a
954 // convenience which allows us to re-pack this file again without
955 // having to remove the section and then add a new small one with objcopy.
956 // The way we resize sections relies on there being some data in a section.
957 data = GetSectionData(android_rel_dyn_section_);
958 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier));
959 RewriteSectionData(data, &kStubIdentifier, sizeof(kStubIdentifier));
960
961 // Rewrite .dynamic to remove two tags describing .android.rel.dyn.
962 data = GetSectionData(dynamic_section_);
963 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
964 std::vector<Elf32_Dyn> dynamics(
965 dynamic_base,
966 dynamic_base + data->d_size / sizeof(dynamics[0]));
967 RemoveDynamicEntry(DT_ANDROID_ARM_REL_SIZE, &dynamics);
968 RemoveDynamicEntry(DT_ANDROID_ARM_REL_OFFSET, &dynamics);
969 const void* dynamics_data = &dynamics[0];
970 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
971 RewriteSectionData(data, dynamics_data, dynamics_bytes);
972
973 Flush();
974 return true;
975 }
976
977 // Flush rewritten shared object file data.
978 void ElfFile::Flush() {
979 // Flag all ELF data held in memory as needing to be written back to the
980 // file, and tell libelf that we have controlled the file layout.
981 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY);
982 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT);
983
984 // Write ELF data back to disk.
985 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE);
986 CHECK(file_bytes > 0);
987 VLOG("elf_update returned: %lu\n", file_bytes);
988
989 // Clean up libelf, and truncate the output file to the number of bytes
990 // written by elf_update().
991 elf_end(elf_);
992 elf_ = NULL;
993 const int truncate = ftruncate(fd_, file_bytes);
994 CHECK(truncate == 0);
995 }
996
997 } // namespace relocation_packer
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698