OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // TODO(simonb): Extend for 64-bit target libraries. | 5 // TODO(simonb): Extend for 64-bit target libraries. |
6 | 6 |
7 #include "elf_file.h" | 7 #include "elf_file.h" |
8 | 8 |
9 #include <stdlib.h> | 9 #include <stdlib.h> |
10 #include <sys/types.h> | 10 #include <sys/types.h> |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 const void* section_data, | 49 const void* section_data, |
50 size_t size) { | 50 size_t size) { |
51 CHECK(size == data->d_size); | 51 CHECK(size == data->d_size); |
52 uint8_t* area = new uint8_t[size]; | 52 uint8_t* area = new uint8_t[size]; |
53 memcpy(area, section_data, size); | 53 memcpy(area, section_data, size); |
54 data->d_buf = area; | 54 data->d_buf = area; |
55 } | 55 } |
56 | 56 |
57 // Verbose ELF header logging. | 57 // Verbose ELF header logging. |
58 void VerboseLogElfHeader(const Elf32_Ehdr* elf_header) { | 58 void VerboseLogElfHeader(const Elf32_Ehdr* elf_header) { |
59 VLOG("e_phoff = %u\n", elf_header->e_phoff); | 59 VLOG(1) << "e_phoff = " << elf_header->e_phoff; |
60 VLOG("e_shoff = %u\n", elf_header->e_shoff); | 60 VLOG(1) << "e_shoff = " << elf_header->e_shoff; |
61 VLOG("e_ehsize = %u\n", elf_header->e_ehsize); | 61 VLOG(1) << "e_ehsize = " << elf_header->e_ehsize; |
62 VLOG("e_phentsize = %u\n", elf_header->e_phentsize); | 62 VLOG(1) << "e_phentsize = " << elf_header->e_phentsize; |
63 VLOG("e_phnum = %u\n", elf_header->e_phnum); | 63 VLOG(1) << "e_phnum = " << elf_header->e_phnum; |
64 VLOG("e_shnum = %u\n", elf_header->e_shnum); | 64 VLOG(1) << "e_shnum = " << elf_header->e_shnum; |
65 VLOG("e_shstrndx = %u\n", elf_header->e_shstrndx); | 65 VLOG(1) << "e_shstrndx = " << elf_header->e_shstrndx; |
66 } | 66 } |
67 | 67 |
68 // Verbose ELF program header logging. | 68 // Verbose ELF program header logging. |
69 void VerboseLogProgramHeader(size_t program_header_index, | 69 void VerboseLogProgramHeader(size_t program_header_index, |
70 const Elf32_Phdr* program_header) { | 70 const Elf32_Phdr* program_header) { |
71 std::string type; | 71 std::string type; |
72 switch (program_header->p_type) { | 72 switch (program_header->p_type) { |
73 case PT_NULL: type = "NULL"; break; | 73 case PT_NULL: type = "NULL"; break; |
74 case PT_LOAD: type = "LOAD"; break; | 74 case PT_LOAD: type = "LOAD"; break; |
75 case PT_DYNAMIC: type = "DYNAMIC"; break; | 75 case PT_DYNAMIC: type = "DYNAMIC"; break; |
76 case PT_INTERP: type = "INTERP"; break; | 76 case PT_INTERP: type = "INTERP"; break; |
77 case PT_NOTE: type = "NOTE"; break; | 77 case PT_NOTE: type = "NOTE"; break; |
78 case PT_SHLIB: type = "SHLIB"; break; | 78 case PT_SHLIB: type = "SHLIB"; break; |
79 case PT_PHDR: type = "PHDR"; break; | 79 case PT_PHDR: type = "PHDR"; break; |
80 case PT_TLS: type = "TLS"; break; | 80 case PT_TLS: type = "TLS"; break; |
81 default: type = "(OTHER)"; break; | 81 default: type = "(OTHER)"; break; |
82 } | 82 } |
83 VLOG("phdr %lu : %s\n", program_header_index, type.c_str()); | 83 VLOG(1) << "phdr " << program_header_index << " : " << type; |
84 VLOG(" p_offset = %u\n", program_header->p_offset); | 84 VLOG(1) << " p_offset = " << program_header->p_offset; |
85 VLOG(" p_vaddr = %u\n", program_header->p_vaddr); | 85 VLOG(1) << " p_vaddr = " << program_header->p_vaddr; |
86 VLOG(" p_paddr = %u\n", program_header->p_paddr); | 86 VLOG(1) << " p_paddr = " << program_header->p_paddr; |
87 VLOG(" p_filesz = %u\n", program_header->p_filesz); | 87 VLOG(1) << " p_filesz = " << program_header->p_filesz; |
88 VLOG(" p_memsz = %u\n", program_header->p_memsz); | 88 VLOG(1) << " p_memsz = " << program_header->p_memsz; |
89 } | 89 } |
90 | 90 |
91 // Verbose ELF section header logging. | 91 // Verbose ELF section header logging. |
92 void VerboseLogSectionHeader(const std::string& section_name, | 92 void VerboseLogSectionHeader(const std::string& section_name, |
93 const Elf32_Shdr* section_header) { | 93 const Elf32_Shdr* section_header) { |
94 VLOG("section %s\n", section_name.c_str()); | 94 VLOG(1) << "section " << section_name; |
95 VLOG(" sh_addr = %u\n", section_header->sh_addr); | 95 VLOG(1) << " sh_addr = " << section_header->sh_addr; |
96 VLOG(" sh_offset = %u\n", section_header->sh_offset); | 96 VLOG(1) << " sh_offset = " << section_header->sh_offset; |
97 VLOG(" sh_size = %u\n", section_header->sh_size); | 97 VLOG(1) << " sh_size = " << section_header->sh_size; |
98 VLOG(" sh_addralign = %u\n", section_header->sh_addralign); | 98 VLOG(1) << " sh_addralign = " << section_header->sh_addralign; |
99 } | 99 } |
100 | 100 |
101 // Verbose ELF section data logging. | 101 // Verbose ELF section data logging. |
102 void VerboseLogSectionData(const Elf_Data* data) { | 102 void VerboseLogSectionData(const Elf_Data* data) { |
103 VLOG(" data\n"); | 103 VLOG(1) << " data"; |
104 VLOG(" d_buf = %p\n", data->d_buf); | 104 VLOG(1) << " d_buf = " << data->d_buf; |
105 VLOG(" d_off = %lu\n", data->d_off); | 105 VLOG(1) << " d_off = " << data->d_off; |
106 VLOG(" d_size = %lu\n", data->d_size); | 106 VLOG(1) << " d_size = " << data->d_size; |
107 VLOG(" d_align = %lu\n", data->d_align); | 107 VLOG(1) << " d_align = " << data->d_align; |
108 } | 108 } |
109 | 109 |
110 } // namespace | 110 } // namespace |
111 | 111 |
112 // Load the complete ELF file into a memory image in libelf, and identify | 112 // Load the complete ELF file into a memory image in libelf, and identify |
113 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the | 113 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the |
114 // ELF file has already been loaded. | 114 // ELF file has already been loaded. |
115 bool ElfFile::Load() { | 115 bool ElfFile::Load() { |
116 if (elf_) | 116 if (elf_) |
117 return true; | 117 return true; |
118 | 118 |
119 elf_ = elf_begin(fd_, ELF_C_RDWR, NULL); | 119 elf_ = elf_begin(fd_, ELF_C_RDWR, NULL); |
120 CHECK(elf_); | 120 CHECK(elf_); |
121 | 121 |
122 if (elf_kind(elf_) != ELF_K_ELF) { | 122 if (elf_kind(elf_) != ELF_K_ELF) { |
123 LOG("ERROR: File not in ELF format\n"); | 123 LOG(ERROR) << "File not in ELF format"; |
124 return false; | 124 return false; |
125 } | 125 } |
126 | 126 |
127 Elf32_Ehdr* elf_header = elf32_getehdr(elf_); | 127 Elf32_Ehdr* elf_header = elf32_getehdr(elf_); |
128 if (!elf_header) { | 128 if (!elf_header) { |
129 LOG("ERROR: Failed to load ELF header\n"); | 129 LOG(ERROR) << "Failed to load ELF header"; |
130 return false; | 130 return false; |
131 } | 131 } |
132 if (elf_header->e_machine != EM_ARM) { | 132 if (elf_header->e_machine != EM_ARM) { |
133 LOG("ERROR: File is not an arm32 ELF file\n"); | 133 LOG(ERROR) << "File is not an arm32 ELF file"; |
134 return false; | 134 return false; |
135 } | 135 } |
136 | 136 |
137 // Require that our endianness matches that of the target, and that both | 137 // Require that our endianness matches that of the target, and that both |
138 // are little-endian. Safe for all current build/target combinations. | 138 // are little-endian. Safe for all current build/target combinations. |
139 const int endian = static_cast<int>(elf_header->e_ident[5]); | 139 const int endian = static_cast<int>(elf_header->e_ident[5]); |
140 CHECK(endian == ELFDATA2LSB); | 140 CHECK(endian == ELFDATA2LSB); |
141 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__); | 141 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__); |
142 | 142 |
143 VLOG("endian = %u\n", endian); | 143 VLOG(1) << "endian = " << endian; |
144 VerboseLogElfHeader(elf_header); | 144 VerboseLogElfHeader(elf_header); |
145 | 145 |
146 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_); | 146 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_); |
147 CHECK(elf_program_header); | 147 CHECK(elf_program_header); |
148 | 148 |
149 const Elf32_Phdr* dynamic_program_header = NULL; | 149 const Elf32_Phdr* dynamic_program_header = NULL; |
150 for (size_t i = 0; i < elf_header->e_phnum; ++i) { | 150 for (size_t i = 0; i < elf_header->e_phnum; ++i) { |
151 const Elf32_Phdr* program_header = &elf_program_header[i]; | 151 const Elf32_Phdr* program_header = &elf_program_header[i]; |
152 VerboseLogProgramHeader(i, program_header); | 152 VerboseLogProgramHeader(i, program_header); |
153 | 153 |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
201 | 201 |
202 Elf_Data* data = NULL; | 202 Elf_Data* data = NULL; |
203 while ((data = elf_getdata(section, data)) != NULL) { | 203 while ((data = elf_getdata(section, data)) != NULL) { |
204 CHECK(data->d_align <= kPreserveAlignment); | 204 CHECK(data->d_align <= kPreserveAlignment); |
205 VerboseLogSectionData(data); | 205 VerboseLogSectionData(data); |
206 } | 206 } |
207 } | 207 } |
208 | 208 |
209 // Loading failed if we did not find the required special sections. | 209 // Loading failed if we did not find the required special sections. |
210 if (!found_rel_dyn_section) { | 210 if (!found_rel_dyn_section) { |
211 LOG("ERROR: Missing .rel.dyn section\n"); | 211 LOG(ERROR) << "Missing .rel.dyn section"; |
212 return false; | 212 return false; |
213 } | 213 } |
214 if (!found_dynamic_section) { | 214 if (!found_dynamic_section) { |
215 LOG("ERROR: Missing .dynamic section\n"); | 215 LOG(ERROR) << "Missing .dynamic section"; |
216 return false; | 216 return false; |
217 } | 217 } |
218 if (!found_android_rel_dyn_section) { | 218 if (!found_android_rel_dyn_section) { |
219 LOG("ERROR: Missing .android.rel.dyn section " | 219 LOG(ERROR) << "Missing .android.rel.dyn section " |
220 "(to fix, run with --help and follow the pre-packing instructions)\n"); | 220 << "(to fix, run with --help and follow the pre-packing " |
| 221 << "instructions)"; |
221 return false; | 222 return false; |
222 } | 223 } |
223 | 224 |
224 if (has_debug_section) { | 225 if (has_debug_section) { |
225 LOG("WARNING: found .debug section(s), and ignored them\n"); | 226 LOG(WARNING) << "Found .debug section(s), and ignored them"; |
226 } | 227 } |
227 | 228 |
228 rel_dyn_section_ = found_rel_dyn_section; | 229 rel_dyn_section_ = found_rel_dyn_section; |
229 dynamic_section_ = found_dynamic_section; | 230 dynamic_section_ = found_dynamic_section; |
230 android_rel_dyn_section_ = found_android_rel_dyn_section; | 231 android_rel_dyn_section_ = found_android_rel_dyn_section; |
231 return true; | 232 return true; |
232 } | 233 } |
233 | 234 |
234 namespace { | 235 namespace { |
235 | 236 |
236 // Helper for ResizeSection(). Adjust the main ELF header for the hole. | 237 // Helper for ResizeSection(). Adjust the main ELF header for the hole. |
237 void AdjustElfHeaderForHole(Elf32_Ehdr* elf_header, | 238 void AdjustElfHeaderForHole(Elf32_Ehdr* elf_header, |
238 Elf32_Off hole_start, | 239 Elf32_Off hole_start, |
239 int32_t hole_size) { | 240 int32_t hole_size) { |
240 if (elf_header->e_phoff > hole_start) { | 241 if (elf_header->e_phoff > hole_start) { |
241 elf_header->e_phoff += hole_size; | 242 elf_header->e_phoff += hole_size; |
242 VLOG("e_phoff adjusted to %u\n", elf_header->e_phoff); | 243 VLOG(1) << "e_phoff adjusted to " << elf_header->e_phoff; |
243 } | 244 } |
244 if (elf_header->e_shoff > hole_start) { | 245 if (elf_header->e_shoff > hole_start) { |
245 elf_header->e_shoff += hole_size; | 246 elf_header->e_shoff += hole_size; |
246 VLOG("e_shoff adjusted to %u\n", elf_header->e_shoff); | 247 VLOG(1) << "e_shoff adjusted to " << elf_header->e_shoff; |
247 } | 248 } |
248 } | 249 } |
249 | 250 |
250 // Helper for ResizeSection(). Adjust all program headers for the hole. | 251 // Helper for ResizeSection(). Adjust all program headers for the hole. |
251 void AdjustProgramHeadersForHole(Elf32_Phdr* elf_program_header, | 252 void AdjustProgramHeadersForHole(Elf32_Phdr* elf_program_header, |
252 size_t program_header_count, | 253 size_t program_header_count, |
253 Elf32_Off hole_start, | 254 Elf32_Off hole_start, |
254 int32_t hole_size) { | 255 int32_t hole_size) { |
255 for (size_t i = 0; i < program_header_count; ++i) { | 256 for (size_t i = 0; i < program_header_count; ++i) { |
256 Elf32_Phdr* program_header = &elf_program_header[i]; | 257 Elf32_Phdr* program_header = &elf_program_header[i]; |
257 | 258 |
258 if (program_header->p_offset > hole_start) { | 259 if (program_header->p_offset > hole_start) { |
259 // The hole start is past this segment, so adjust offsets and addrs. | 260 // The hole start is past this segment, so adjust offsets and addrs. |
260 program_header->p_offset += hole_size; | 261 program_header->p_offset += hole_size; |
261 VLOG("phdr %lu p_offset adjusted to %u\n", i, program_header->p_offset); | 262 VLOG(1) << "phdr " << i |
| 263 << " p_offset adjusted to "<< program_header->p_offset; |
262 | 264 |
263 // Only adjust vaddr and paddr if this program header has them. | 265 // Only adjust vaddr and paddr if this program header has them. |
264 if (program_header->p_vaddr != 0) { | 266 if (program_header->p_vaddr != 0) { |
265 program_header->p_vaddr += hole_size; | 267 program_header->p_vaddr += hole_size; |
266 VLOG("phdr %lu p_vaddr adjusted to %u\n", i, program_header->p_vaddr); | 268 VLOG(1) << "phdr " << i |
| 269 << " p_vaddr adjusted to " << program_header->p_vaddr; |
267 } | 270 } |
268 if (program_header->p_paddr != 0) { | 271 if (program_header->p_paddr != 0) { |
269 program_header->p_paddr += hole_size; | 272 program_header->p_paddr += hole_size; |
270 VLOG("phdr %lu p_paddr adjusted to %u\n", i, program_header->p_paddr); | 273 VLOG(1) << "phdr " << i |
| 274 << " p_paddr adjusted to " << program_header->p_paddr; |
271 } | 275 } |
272 } else if (program_header->p_offset + | 276 } else if (program_header->p_offset + |
273 program_header->p_filesz > hole_start) { | 277 program_header->p_filesz > hole_start) { |
274 // The hole start is within this segment, so adjust file and in-memory | 278 // The hole start is within this segment, so adjust file and in-memory |
275 // sizes, but leave offsets and addrs unchanged. | 279 // sizes, but leave offsets and addrs unchanged. |
276 program_header->p_filesz += hole_size; | 280 program_header->p_filesz += hole_size; |
277 VLOG("phdr %lu p_filesz adjusted to %u\n", i, program_header->p_filesz); | 281 VLOG(1) << "phdr " << i |
| 282 << " p_filesz adjusted to " << program_header->p_filesz; |
278 program_header->p_memsz += hole_size; | 283 program_header->p_memsz += hole_size; |
279 VLOG("phdr %lu p_memsz adjusted to %u\n", i, program_header->p_memsz); | 284 VLOG(1) << "phdr " << i |
| 285 << " p_memsz adjusted to " << program_header->p_memsz; |
280 } | 286 } |
281 } | 287 } |
282 } | 288 } |
283 | 289 |
284 // Helper for ResizeSection(). Adjust all section headers for the hole. | 290 // Helper for ResizeSection(). Adjust all section headers for the hole. |
285 void AdjustSectionHeadersForHole(Elf* elf, | 291 void AdjustSectionHeadersForHole(Elf* elf, |
286 Elf32_Off hole_start, | 292 Elf32_Off hole_start, |
287 int32_t hole_size) { | 293 int32_t hole_size) { |
288 size_t string_index; | 294 size_t string_index; |
289 elf_getshdrstrndx(elf, &string_index); | 295 elf_getshdrstrndx(elf, &string_index); |
290 | 296 |
291 Elf_Scn* section = NULL; | 297 Elf_Scn* section = NULL; |
292 while ((section = elf_nextscn(elf, section)) != NULL) { | 298 while ((section = elf_nextscn(elf, section)) != NULL) { |
293 Elf32_Shdr* section_header = elf32_getshdr(section); | 299 Elf32_Shdr* section_header = elf32_getshdr(section); |
294 std::string name = elf_strptr(elf, string_index, section_header->sh_name); | 300 std::string name = elf_strptr(elf, string_index, section_header->sh_name); |
295 | 301 |
296 if (section_header->sh_offset > hole_start) { | 302 if (section_header->sh_offset > hole_start) { |
297 section_header->sh_offset += hole_size; | 303 section_header->sh_offset += hole_size; |
298 VLOG("section %s sh_offset" | 304 VLOG(1) << "section " << name |
299 " adjusted to %u\n", name.c_str(), section_header->sh_offset); | 305 << " sh_offset adjusted to " << section_header->sh_offset; |
300 // Only adjust section addr if this section has one. | 306 // Only adjust section addr if this section has one. |
301 if (section_header->sh_addr != 0) { | 307 if (section_header->sh_addr != 0) { |
302 section_header->sh_addr += hole_size; | 308 section_header->sh_addr += hole_size; |
303 VLOG("section %s sh_addr" | 309 VLOG(1) << "section " << name |
304 " adjusted to %u\n", name.c_str(), section_header->sh_addr); | 310 << " sh_addr adjusted to " << section_header->sh_addr; |
305 } | 311 } |
306 } | 312 } |
307 } | 313 } |
308 } | 314 } |
309 | 315 |
310 // Helper for ResizeSection(). Adjust the .dynamic section for the hole. | 316 // Helper for ResizeSection(). Adjust the .dynamic section for the hole. |
311 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section, | 317 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section, |
312 bool is_rel_dyn_resize, | 318 bool is_rel_dyn_resize, |
313 Elf32_Off hole_start, | 319 Elf32_Off hole_start, |
314 int32_t hole_size) { | 320 int32_t hole_size) { |
(...skipping 15 matching lines...) Expand all Loading... |
330 tag == DT_RELA || | 336 tag == DT_RELA || |
331 tag == DT_INIT || | 337 tag == DT_INIT || |
332 tag == DT_FINI || | 338 tag == DT_FINI || |
333 tag == DT_REL || | 339 tag == DT_REL || |
334 tag == DT_JMPREL || | 340 tag == DT_JMPREL || |
335 tag == DT_INIT_ARRAY || | 341 tag == DT_INIT_ARRAY || |
336 tag == DT_FINI_ARRAY || | 342 tag == DT_FINI_ARRAY || |
337 tag == DT_ANDROID_ARM_REL_OFFSET); | 343 tag == DT_ANDROID_ARM_REL_OFFSET); |
338 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) { | 344 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) { |
339 dynamic->d_un.d_ptr += hole_size; | 345 dynamic->d_un.d_ptr += hole_size; |
340 VLOG("dynamic[%lu] %u" | 346 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag |
341 " d_ptr adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_ptr); | 347 << " d_ptr adjusted to " << dynamic->d_un.d_ptr; |
342 } | 348 } |
343 | 349 |
344 // If we are specifically resizing .rel.dyn, we need to make some added | 350 // If we are specifically resizing .rel.dyn, we need to make some added |
345 // adjustments to tags that indicate the counts of R_ARM_RELATIVE | 351 // adjustments to tags that indicate the counts of R_ARM_RELATIVE |
346 // relocations in the shared object. | 352 // relocations in the shared object. |
347 if (is_rel_dyn_resize) { | 353 if (is_rel_dyn_resize) { |
348 // DT_RELSZ is the overall size of relocations. Adjust by hole size. | 354 // DT_RELSZ is the overall size of relocations. Adjust by hole size. |
349 if (tag == DT_RELSZ) { | 355 if (tag == DT_RELSZ) { |
350 dynamic->d_un.d_val += hole_size; | 356 dynamic->d_un.d_val += hole_size; |
351 VLOG("dynamic[%lu] %u" | 357 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag |
352 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val); | 358 << " d_val adjusted to " << dynamic->d_un.d_val; |
353 } | 359 } |
354 | 360 |
355 // The crazy linker does not use DT_RELCOUNT, but we keep it updated | 361 // DT_RELCOUNT is the count of relative relocations. Packing reduces it |
356 // anyway. In practice the section hole is always equal to the size | 362 // to the alignment padding, if any; unpacking restores it to its former |
357 // of R_ARM_RELATIVE relocations, and DT_RELCOUNT is the count of | 363 // value. The crazy linker does not use it, but we update it anyway. |
358 // relative relocations. So closing a hole on packing reduces | |
359 // DT_RELCOUNT to zero, and opening a hole on unpacking restores it to | |
360 // its pre-packed value. | |
361 if (tag == DT_RELCOUNT) { | 364 if (tag == DT_RELCOUNT) { |
362 dynamic->d_un.d_val += hole_size / sizeof(Elf32_Rel); | 365 // Cast sizeof to a signed type to avoid the division result being |
363 VLOG("dynamic[%lu] %u" | 366 // promoted into an unsigned size_t. |
364 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val); | 367 const ssize_t sizeof_rel = static_cast<ssize_t>(sizeof(Elf32_Rel)); |
| 368 dynamic->d_un.d_val += hole_size / sizeof_rel; |
| 369 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag |
| 370 << " d_val adjusted to " << dynamic->d_un.d_val; |
365 } | 371 } |
366 | 372 |
367 // DT_RELENT doesn't change, but make sure it is what we expect. | 373 // DT_RELENT doesn't change, but make sure it is what we expect. |
368 if (tag == DT_RELENT) { | 374 if (tag == DT_RELENT) { |
369 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel)); | 375 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel)); |
370 } | 376 } |
371 } | 377 } |
372 } | 378 } |
373 | 379 |
374 void* section_data = &dynamics[0]; | 380 void* section_data = &dynamics[0]; |
(...skipping 17 matching lines...) Expand all Loading... |
392 Elf32_Sym* dynsym = &dynsyms[i]; | 398 Elf32_Sym* dynsym = &dynsyms[i]; |
393 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info)); | 399 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info)); |
394 const bool is_adjustable = (type == STT_OBJECT || | 400 const bool is_adjustable = (type == STT_OBJECT || |
395 type == STT_FUNC || | 401 type == STT_FUNC || |
396 type == STT_SECTION || | 402 type == STT_SECTION || |
397 type == STT_FILE || | 403 type == STT_FILE || |
398 type == STT_COMMON || | 404 type == STT_COMMON || |
399 type == STT_TLS); | 405 type == STT_TLS); |
400 if (is_adjustable && dynsym->st_value > hole_start) { | 406 if (is_adjustable && dynsym->st_value > hole_start) { |
401 dynsym->st_value += hole_size; | 407 dynsym->st_value += hole_size; |
402 VLOG("dynsym[%lu] type=%u" | 408 VLOG(1) << "dynsym[" << i << "] type=" << type |
403 " st_value adjusted to %u\n", i, type, dynsym->st_value); | 409 << " st_value adjusted to " << dynsym->st_value; |
404 } | 410 } |
405 } | 411 } |
406 | 412 |
407 void* section_data = &dynsyms[0]; | 413 void* section_data = &dynsyms[0]; |
408 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]); | 414 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]); |
409 RewriteSectionData(data, section_data, bytes); | 415 RewriteSectionData(data, section_data, bytes); |
410 } | 416 } |
411 | 417 |
412 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole. | 418 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole. |
413 // We need to adjust the offset of every relocation inside it that falls | 419 // We need to adjust the offset of every relocation inside it that falls |
414 // beyond the hole start. | 420 // beyond the hole start. |
415 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section, | 421 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section, |
416 Elf32_Off hole_start, | 422 Elf32_Off hole_start, |
417 int32_t hole_size) { | 423 int32_t hole_size) { |
418 Elf_Data* data = GetSectionData(relplt_section); | 424 Elf_Data* data = GetSectionData(relplt_section); |
419 | 425 |
420 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); | 426 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); |
421 std::vector<Elf32_Rel> relplts( | 427 std::vector<Elf32_Rel> relplts( |
422 relplt_base, | 428 relplt_base, |
423 relplt_base + data->d_size / sizeof(relplts[0])); | 429 relplt_base + data->d_size / sizeof(relplts[0])); |
424 | 430 |
425 for (size_t i = 0; i < relplts.size(); ++i) { | 431 for (size_t i = 0; i < relplts.size(); ++i) { |
426 Elf32_Rel* relplt = &relplts[i]; | 432 Elf32_Rel* relplt = &relplts[i]; |
427 if (relplt->r_offset > hole_start) { | 433 if (relplt->r_offset > hole_start) { |
428 relplt->r_offset += hole_size; | 434 relplt->r_offset += hole_size; |
429 VLOG("relplt[%lu] r_offset adjusted to %u\n", i, relplt->r_offset); | 435 VLOG(1) << "relplt[" << i |
| 436 << "] r_offset adjusted to " << relplt->r_offset; |
430 } | 437 } |
431 } | 438 } |
432 | 439 |
433 void* section_data = &relplts[0]; | 440 void* section_data = &relplts[0]; |
434 size_t bytes = relplts.size() * sizeof(relplts[0]); | 441 size_t bytes = relplts.size() * sizeof(relplts[0]); |
435 RewriteSectionData(data, section_data, bytes); | 442 RewriteSectionData(data, section_data, bytes); |
436 } | 443 } |
437 | 444 |
438 // Helper for ResizeSection(). Adjust the .symtab section for the hole. | 445 // Helper for ResizeSection(). Adjust the .symtab section for the hole. |
439 // We want to adjust the value of every symbol in it that falls beyond | 446 // We want to adjust the value of every symbol in it that falls beyond |
440 // the hole start. | 447 // the hole start. |
441 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section, | 448 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section, |
442 Elf32_Off hole_start, | 449 Elf32_Off hole_start, |
443 int32_t hole_size) { | 450 int32_t hole_size) { |
444 Elf_Data* data = GetSectionData(symtab_section); | 451 Elf_Data* data = GetSectionData(symtab_section); |
445 | 452 |
446 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf); | 453 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf); |
447 std::vector<Elf32_Sym> symtab( | 454 std::vector<Elf32_Sym> symtab( |
448 symtab_base, | 455 symtab_base, |
449 symtab_base + data->d_size / sizeof(symtab[0])); | 456 symtab_base + data->d_size / sizeof(symtab[0])); |
450 | 457 |
451 for (size_t i = 0; i < symtab.size(); ++i) { | 458 for (size_t i = 0; i < symtab.size(); ++i) { |
452 Elf32_Sym* sym = &symtab[i]; | 459 Elf32_Sym* sym = &symtab[i]; |
453 if (sym->st_value > hole_start) { | 460 if (sym->st_value > hole_start) { |
454 sym->st_value += hole_size; | 461 sym->st_value += hole_size; |
455 VLOG("symtab[%lu] value adjusted to %u\n", i, sym->st_value); | 462 VLOG(1) << "symtab[" << i << "] value adjusted to " << sym->st_value; |
456 } | 463 } |
457 } | 464 } |
458 | 465 |
459 void* section_data = &symtab[0]; | 466 void* section_data = &symtab[0]; |
460 size_t bytes = symtab.size() * sizeof(symtab[0]); | 467 size_t bytes = symtab.size() * sizeof(symtab[0]); |
461 RewriteSectionData(data, section_data, bytes); | 468 RewriteSectionData(data, section_data, bytes); |
462 } | 469 } |
463 | 470 |
464 // Resize a section. If the new size is larger than the current size, open | 471 // Resize a section. If the new size is larger than the current size, open |
465 // up a hole by increasing file offsets that come after the hole. If smaller | 472 // up a hole by increasing file offsets that come after the hole. If smaller |
(...skipping 17 matching lines...) Expand all Loading... |
483 Elf_Data* data = GetSectionData(section); | 490 Elf_Data* data = GetSectionData(section); |
484 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size); | 491 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size); |
485 | 492 |
486 // Require that the section is not zero-length (that is, has allocated | 493 // Require that the section is not zero-length (that is, has allocated |
487 // data that we can validly expand). | 494 // data that we can validly expand). |
488 CHECK(data->d_size && data->d_buf); | 495 CHECK(data->d_size && data->d_buf); |
489 | 496 |
490 const Elf32_Off hole_start = section_header->sh_offset; | 497 const Elf32_Off hole_start = section_header->sh_offset; |
491 const int32_t hole_size = new_size - data->d_size; | 498 const int32_t hole_size = new_size - data->d_size; |
492 | 499 |
493 VLOG_IF(hole_size > 0, "expand section size = %lu\n", data->d_size); | 500 VLOG_IF(1, (hole_size > 0)) << "expand section size = " << data->d_size; |
494 VLOG_IF(hole_size < 0, "shrink section size = %lu\n", data->d_size); | 501 VLOG_IF(1, (hole_size < 0)) << "shrink section size = " << data->d_size; |
495 | 502 |
496 // Resize the data and the section header. | 503 // Resize the data and the section header. |
497 data->d_size += hole_size; | 504 data->d_size += hole_size; |
498 section_header->sh_size += hole_size; | 505 section_header->sh_size += hole_size; |
499 | 506 |
500 Elf32_Ehdr* elf_header = elf32_getehdr(elf); | 507 Elf32_Ehdr* elf_header = elf32_getehdr(elf); |
501 Elf32_Phdr* elf_program_header = elf32_getphdr(elf); | 508 Elf32_Phdr* elf_program_header = elf32_getphdr(elf); |
502 | 509 |
503 // Add the hole size to all offsets in the ELF file that are after the | 510 // Add the hole size to all offsets in the ELF file that are after the |
504 // start of the hole. If the hole size is positive we are expanding the | 511 // start of the hole. If the hole size is positive we are expanding the |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
588 // Replace the first free (unused) slot in a dynamics vector with the given | 595 // Replace the first free (unused) slot in a dynamics vector with the given |
589 // value. The vector always ends with a free (unused) element, so the slot | 596 // value. The vector always ends with a free (unused) element, so the slot |
590 // found cannot be the last one in the vector. | 597 // found cannot be the last one in the vector. |
591 void AddDynamicEntry(Elf32_Dyn dyn, | 598 void AddDynamicEntry(Elf32_Dyn dyn, |
592 std::vector<Elf32_Dyn>* dynamics) { | 599 std::vector<Elf32_Dyn>* dynamics) { |
593 // Loop until the penultimate entry. We cannot replace the end sentinel. | 600 // Loop until the penultimate entry. We cannot replace the end sentinel. |
594 for (size_t i = 0; i < dynamics->size() - 1; ++i) { | 601 for (size_t i = 0; i < dynamics->size() - 1; ++i) { |
595 Elf32_Dyn &slot = dynamics->at(i); | 602 Elf32_Dyn &slot = dynamics->at(i); |
596 if (slot.d_tag == DT_NULL) { | 603 if (slot.d_tag == DT_NULL) { |
597 slot = dyn; | 604 slot = dyn; |
598 VLOG("dynamic[%lu] overwritten with %u\n", i, dyn.d_tag); | 605 VLOG(1) << "dynamic[" << i << "] overwritten with " << dyn.d_tag; |
599 return; | 606 return; |
600 } | 607 } |
601 } | 608 } |
602 | 609 |
603 // No free dynamics vector slot was found. | 610 // No free dynamics vector slot was found. |
604 LOG("FATAL: No spare dynamic vector slots found " | 611 LOG(FATAL) << "No spare dynamic vector slots found " |
605 "(to fix, increase gold's --spare-dynamic-tags value)\n"); | 612 << "(to fix, increase gold's --spare-dynamic-tags value)"; |
606 NOTREACHED(); | |
607 } | 613 } |
608 | 614 |
609 // Remove the element in the dynamics vector that matches the given tag with | 615 // Remove the element in the dynamics vector that matches the given tag with |
610 // unused slot data. Shuffle the following elements up, and ensure that the | 616 // unused slot data. Shuffle the following elements up, and ensure that the |
611 // last is the null sentinel. | 617 // last is the null sentinel. |
612 void RemoveDynamicEntry(Elf32_Sword tag, | 618 void RemoveDynamicEntry(Elf32_Sword tag, |
613 std::vector<Elf32_Dyn>* dynamics) { | 619 std::vector<Elf32_Dyn>* dynamics) { |
614 // Loop until the penultimate entry, and never match the end sentinel. | 620 // Loop until the penultimate entry, and never match the end sentinel. |
615 for (size_t i = 0; i < dynamics->size() - 1; ++i) { | 621 for (size_t i = 0; i < dynamics->size() - 1; ++i) { |
616 Elf32_Dyn &slot = dynamics->at(i); | 622 Elf32_Dyn &slot = dynamics->at(i); |
617 if (slot.d_tag == tag) { | 623 if (slot.d_tag == tag) { |
618 for ( ; i < dynamics->size() - 1; ++i) { | 624 for ( ; i < dynamics->size() - 1; ++i) { |
619 dynamics->at(i) = dynamics->at(i + 1); | 625 dynamics->at(i) = dynamics->at(i + 1); |
620 VLOG("dynamic[%lu] overwritten with dynamic[%lu]\n", i, i + 1); | 626 VLOG(1) << "dynamic[" << i |
| 627 << "] overwritten with dynamic[" << i + 1 << "]"; |
621 } | 628 } |
622 CHECK(dynamics->at(i).d_tag == DT_NULL); | 629 CHECK(dynamics->at(i).d_tag == DT_NULL); |
623 return; | 630 return; |
624 } | 631 } |
625 } | 632 } |
626 | 633 |
627 // No matching dynamics vector entry was found. | 634 // No matching dynamics vector entry was found. |
628 NOTREACHED(); | 635 NOTREACHED(); |
629 } | 636 } |
630 | 637 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
666 if (*target > hole_start) { | 673 if (*target > hole_start) { |
667 // Copy on first write. Recompute target to point into the newly | 674 // Copy on first write. Recompute target to point into the newly |
668 // allocated buffer. | 675 // allocated buffer. |
669 if (area == data->d_buf) { | 676 if (area == data->d_buf) { |
670 area = new uint8_t[data->d_size]; | 677 area = new uint8_t[data->d_size]; |
671 memcpy(area, data->d_buf, data->d_size); | 678 memcpy(area, data->d_buf, data->d_size); |
672 target = reinterpret_cast<Elf32_Off*>(area + byte_offset); | 679 target = reinterpret_cast<Elf32_Off*>(area + byte_offset); |
673 } | 680 } |
674 | 681 |
675 *target += hole_size; | 682 *target += hole_size; |
676 VLOG("relocation[%lu] target adjusted to %u\n", i, *target); | 683 VLOG(1) << "relocation[" << i << "] target adjusted to " << *target; |
677 } | 684 } |
678 } | 685 } |
679 } | 686 } |
680 | 687 |
681 // If we applied any relocation to this section, write it back. | 688 // If we applied any relocation to this section, write it back. |
682 if (area != data->d_buf) { | 689 if (area != data->d_buf) { |
683 RewriteSectionData(data, area, data->d_size); | 690 RewriteSectionData(data, area, data->d_size); |
684 delete [] area; | 691 delete [] area; |
685 } | 692 } |
686 } | 693 } |
(...skipping 10 matching lines...) Expand all Loading... |
697 // Adjust relocations so that the offset that they indicate will be correct | 704 // Adjust relocations so that the offset that they indicate will be correct |
698 // after the hole in .rel.dyn is added or removed (in effect, relocate the | 705 // after the hole in .rel.dyn is added or removed (in effect, relocate the |
699 // relocations). | 706 // relocations). |
700 void AdjustRelocations(Elf32_Off hole_start, | 707 void AdjustRelocations(Elf32_Off hole_start, |
701 size_t hole_size, | 708 size_t hole_size, |
702 std::vector<Elf32_Rel>* relocations) { | 709 std::vector<Elf32_Rel>* relocations) { |
703 for (size_t i = 0; i < relocations->size(); ++i) { | 710 for (size_t i = 0; i < relocations->size(); ++i) { |
704 Elf32_Rel* relocation = &relocations->at(i); | 711 Elf32_Rel* relocation = &relocations->at(i); |
705 if (relocation->r_offset > hole_start) { | 712 if (relocation->r_offset > hole_start) { |
706 relocation->r_offset += hole_size; | 713 relocation->r_offset += hole_size; |
707 VLOG("relocation[%lu] offset adjusted to %u\n", i, relocation->r_offset); | 714 VLOG(1) << "relocation[" << i |
| 715 << "] offset adjusted to " << relocation->r_offset; |
708 } | 716 } |
709 } | 717 } |
710 } | 718 } |
711 | 719 |
712 } // namespace | 720 } // namespace |
713 | 721 |
714 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data | 722 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data |
715 // into .android.rel.dyn. | 723 // into .android.rel.dyn. |
716 bool ElfFile::PackRelocations() { | 724 bool ElfFile::PackRelocations() { |
717 // Load the ELF file into libelf. | 725 // Load the ELF file into libelf. |
718 if (!Load()) { | 726 if (!Load()) { |
719 LOG("ERROR: Failed to load as ELF (elf_error=%d)\n", elf_errno()); | 727 LOG(ERROR) << "Failed to load as ELF (elf_error=" << elf_errno() << ")"; |
720 return false; | 728 return false; |
721 } | 729 } |
722 | 730 |
723 // Retrieve the current .rel.dyn section data. | 731 // Retrieve the current .rel.dyn section data. |
724 Elf_Data* data = GetSectionData(rel_dyn_section_); | 732 Elf_Data* data = GetSectionData(rel_dyn_section_); |
725 | 733 |
726 // Convert data to a vector of Elf32 relocations. | 734 // Convert data to a vector of Elf32 relocations. |
727 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); | 735 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); |
728 std::vector<Elf32_Rel> relocations( | 736 std::vector<Elf32_Rel> relocations( |
729 relocations_base, | 737 relocations_base, |
730 relocations_base + data->d_size / sizeof(relocations[0])); | 738 relocations_base + data->d_size / sizeof(relocations[0])); |
731 | 739 |
732 std::vector<Elf32_Rel> relative_relocations; | 740 std::vector<Elf32_Rel> relative_relocations; |
733 std::vector<Elf32_Rel> other_relocations; | 741 std::vector<Elf32_Rel> other_relocations; |
734 | 742 |
735 // Filter relocations into those that are R_ARM_RELATIVE and others. | 743 // Filter relocations into those that are R_ARM_RELATIVE and others. |
736 for (size_t i = 0; i < relocations.size(); ++i) { | 744 for (size_t i = 0; i < relocations.size(); ++i) { |
737 const Elf32_Rel& relocation = relocations[i]; | 745 const Elf32_Rel& relocation = relocations[i]; |
738 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) { | 746 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) { |
739 CHECK(ELF32_R_SYM(relocation.r_info) == 0); | 747 CHECK(ELF32_R_SYM(relocation.r_info) == 0); |
740 relative_relocations.push_back(relocation); | 748 relative_relocations.push_back(relocation); |
741 } else { | 749 } else { |
742 other_relocations.push_back(relocation); | 750 other_relocations.push_back(relocation); |
743 } | 751 } |
744 } | 752 } |
745 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); | 753 LOG(INFO) << "R_ARM_RELATIVE: " << relative_relocations.size() << " entries"; |
746 LOG("Other : %lu entries\n", other_relocations.size()); | 754 LOG(INFO) << "Other : " << other_relocations.size() << " entries"; |
747 LOG("Total : %lu entries\n", relocations.size()); | 755 LOG(INFO) << "Total : " << relocations.size() << " entries"; |
748 | 756 |
749 // If no relative relocations then we have nothing packable. Perhaps | 757 // If no relative relocations then we have nothing packable. Perhaps |
750 // the shared object has already been packed? | 758 // the shared object has already been packed? |
751 if (relative_relocations.empty()) { | 759 if (relative_relocations.empty()) { |
752 LOG("ERROR: No R_ARM_RELATIVE relocations found (already packed?)\n"); | 760 LOG(ERROR) << "No R_ARM_RELATIVE relocations found (already packed?)"; |
753 return false; | 761 return false; |
754 } | 762 } |
755 | 763 |
756 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the | 764 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the |
757 // hole, and pre-adjust all relocation offsets accordingly. | 765 // hole, and pre-adjust all relocation offsets accordingly. |
758 if (!is_padding_rel_dyn_) { | 766 if (!is_padding_rel_dyn_) { |
759 // Pre-calculate the size of the hole we will close up when we rewrite | 767 // Pre-calculate the size of the hole we will close up when we rewrite |
760 // .rel.dyn. We have to adjust relocation addresses to account for this. | 768 // .rel.dyn. We have to adjust relocation addresses to account for this. |
761 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | 769 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); |
762 const Elf32_Off hole_start = section_header->sh_offset; | 770 const Elf32_Off hole_start = section_header->sh_offset; |
763 size_t hole_size = | 771 size_t hole_size = |
764 relative_relocations.size() * sizeof(relative_relocations[0]); | 772 relative_relocations.size() * sizeof(relative_relocations[0]); |
765 const size_t unaligned_hole_size = hole_size; | 773 const size_t unaligned_hole_size = hole_size; |
766 | 774 |
767 // Adjust the actual hole size to preserve alignment. | 775 // Adjust the actual hole size to preserve alignment. |
768 hole_size -= hole_size % kPreserveAlignment; | 776 hole_size -= hole_size % kPreserveAlignment; |
769 LOG("Compaction : %lu bytes\n", hole_size); | 777 LOG(INFO) << "Compaction : " << hole_size << " bytes"; |
770 | 778 |
771 // Adjusting for alignment may have removed any packing benefit. | 779 // Adjusting for alignment may have removed any packing benefit. |
772 if (hole_size == 0) { | 780 if (hole_size == 0) { |
773 LOG("Too few R_ARM_RELATIVE relocations to pack after alignment\n"); | 781 LOG(INFO) << "Too few R_ARM_RELATIVE relocations to pack after alignment"; |
774 return false; | 782 return false; |
775 } | 783 } |
776 | 784 |
777 // Add R_ARM_NONE relocations to other_relocations to preserve alignment. | 785 // Add R_ARM_NONE relocations to other_relocations to preserve alignment. |
778 const size_t padding_bytes = unaligned_hole_size - hole_size; | 786 const size_t padding_bytes = unaligned_hole_size - hole_size; |
779 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0); | 787 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0); |
780 const size_t required = padding_bytes / sizeof(other_relocations[0]); | 788 const size_t required = padding_bytes / sizeof(other_relocations[0]); |
781 PadRelocations(required, &other_relocations); | 789 PadRelocations(required, &other_relocations); |
782 LOG("Alignment pad : %lu relocations\n", required); | 790 LOG(INFO) << "Alignment pad : " << required << " relocations"; |
783 | 791 |
784 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the | 792 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the |
785 // area it will occupy once the hole in .rel.dyn is removed. | 793 // area it will occupy once the hole in .rel.dyn is removed. |
786 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations); | 794 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations); |
787 // Relocate the relocations. | 795 // Relocate the relocations. |
788 AdjustRelocations(hole_start, -hole_size, &relative_relocations); | 796 AdjustRelocations(hole_start, -hole_size, &relative_relocations); |
789 AdjustRelocations(hole_start, -hole_size, &other_relocations); | 797 AdjustRelocations(hole_start, -hole_size, &other_relocations); |
790 } else { | 798 } else { |
791 // If padding, add R_ARM_NONE relocations to other_relocations to make it | 799 // If padding, add R_ARM_NONE relocations to other_relocations to make it |
792 // the same size as the the original relocations we read in. This makes | 800 // the same size as the the original relocations we read in. This makes |
793 // the ResizeSection() below a no-op. | 801 // the ResizeSection() below a no-op. |
794 const size_t required = relocations.size() - other_relocations.size(); | 802 const size_t required = relocations.size() - other_relocations.size(); |
795 PadRelocations(required, &other_relocations); | 803 PadRelocations(required, &other_relocations); |
796 } | 804 } |
797 | 805 |
798 | 806 |
799 // Pack R_ARM_RELATIVE relocations. | 807 // Pack R_ARM_RELATIVE relocations. |
800 const size_t initial_bytes = | 808 const size_t initial_bytes = |
801 relative_relocations.size() * sizeof(relative_relocations[0]); | 809 relative_relocations.size() * sizeof(relative_relocations[0]); |
802 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", initial_bytes); | 810 LOG(INFO) << "Unpacked R_ARM_RELATIVE: " << initial_bytes << " bytes"; |
803 std::vector<uint8_t> packed; | 811 std::vector<uint8_t> packed; |
804 RelocationPacker packer; | 812 RelocationPacker packer; |
805 packer.PackRelativeRelocations(relative_relocations, &packed); | 813 packer.PackRelativeRelocations(relative_relocations, &packed); |
806 const void* packed_data = &packed[0]; | 814 const void* packed_data = &packed[0]; |
807 const size_t packed_bytes = packed.size() * sizeof(packed[0]); | 815 const size_t packed_bytes = packed.size() * sizeof(packed[0]); |
808 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes); | 816 LOG(INFO) << "Packed R_ARM_RELATIVE: " << packed_bytes << " bytes"; |
809 | 817 |
810 // If we have insufficient R_ARM_RELATIVE relocations to form a run then | 818 // If we have insufficient R_ARM_RELATIVE relocations to form a run then |
811 // packing fails. | 819 // packing fails. |
812 if (packed.empty()) { | 820 if (packed.empty()) { |
813 LOG("Too few R_ARM_RELATIVE relocations to pack\n"); | 821 LOG(INFO) << "Too few R_ARM_RELATIVE relocations to pack"; |
814 return false; | 822 return false; |
815 } | 823 } |
816 | 824 |
817 // Run a loopback self-test as a check that packing is lossless. | 825 // Run a loopback self-test as a check that packing is lossless. |
818 std::vector<Elf32_Rel> unpacked; | 826 std::vector<Elf32_Rel> unpacked; |
819 packer.UnpackRelativeRelocations(packed, &unpacked); | 827 packer.UnpackRelativeRelocations(packed, &unpacked); |
820 CHECK(unpacked.size() == relative_relocations.size()); | 828 CHECK(unpacked.size() == relative_relocations.size()); |
821 for (size_t i = 0; i < unpacked.size(); ++i) { | 829 for (size_t i = 0; i < unpacked.size(); ++i) { |
822 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset); | 830 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset); |
823 CHECK(unpacked[i].r_info == relative_relocations[i].r_info); | 831 CHECK(unpacked[i].r_info == relative_relocations[i].r_info); |
824 } | 832 } |
825 | 833 |
826 // Make sure packing saved some space. | 834 // Make sure packing saved some space. |
827 if (packed_bytes >= initial_bytes) { | 835 if (packed_bytes >= initial_bytes) { |
828 LOG("Packing R_ARM_RELATIVE relocations saves no space\n"); | 836 LOG(INFO) << "Packing R_ARM_RELATIVE relocations saves no space"; |
829 return false; | 837 return false; |
830 } | 838 } |
831 | 839 |
832 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE | 840 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE |
833 // relocations, then shrink it to size. | 841 // relocations, then shrink it to size. |
834 const void* section_data = &other_relocations[0]; | 842 const void* section_data = &other_relocations[0]; |
835 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]); | 843 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]); |
836 ResizeSection(elf_, rel_dyn_section_, bytes); | 844 ResizeSection(elf_, rel_dyn_section_, bytes); |
837 RewriteSectionData(data, section_data, bytes); | 845 RewriteSectionData(data, section_data, bytes); |
838 | 846 |
(...skipping 23 matching lines...) Expand all Loading... |
862 | 870 |
863 Flush(); | 871 Flush(); |
864 return true; | 872 return true; |
865 } | 873 } |
866 | 874 |
867 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them, | 875 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them, |
868 // and rewrite the .rel.dyn section in so_file to contain unpacked data. | 876 // and rewrite the .rel.dyn section in so_file to contain unpacked data. |
869 bool ElfFile::UnpackRelocations() { | 877 bool ElfFile::UnpackRelocations() { |
870 // Load the ELF file into libelf. | 878 // Load the ELF file into libelf. |
871 if (!Load()) { | 879 if (!Load()) { |
872 LOG("ERROR: Failed to load as ELF (elf_error=%d)\n", elf_errno()); | 880 LOG(ERROR) << "Failed to load as ELF (elf_error=" << elf_errno() << ")"; |
873 return false; | 881 return false; |
874 } | 882 } |
875 | 883 |
876 // Retrieve the current .android.rel.dyn section data. | 884 // Retrieve the current .android.rel.dyn section data. |
877 Elf_Data* data = GetSectionData(android_rel_dyn_section_); | 885 Elf_Data* data = GetSectionData(android_rel_dyn_section_); |
878 | 886 |
879 // Convert data to a vector of bytes. | 887 // Convert data to a vector of bytes. |
880 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf); | 888 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf); |
881 std::vector<uint8_t> packed( | 889 std::vector<uint8_t> packed( |
882 packed_base, | 890 packed_base, |
883 packed_base + data->d_size / sizeof(packed[0])); | 891 packed_base + data->d_size / sizeof(packed[0])); |
884 | 892 |
885 // Properly packed data must begin with "APR1". | 893 // Properly packed data must begin with "APR1". |
886 if (packed.empty() || | 894 if (packed.empty() || |
887 packed[0] != 'A' || packed[1] != 'P' || | 895 packed[0] != 'A' || packed[1] != 'P' || |
888 packed[2] != 'R' || packed[3] != '1') { | 896 packed[2] != 'R' || packed[3] != '1') { |
889 LOG("ERROR: Packed R_ARM_RELATIVE relocations not found (not packed?)\n"); | 897 LOG(ERROR) << "Packed R_ARM_RELATIVE relocations not found (not packed?)"; |
890 return false; | 898 return false; |
891 } | 899 } |
892 | 900 |
893 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations. | 901 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations. |
894 const size_t packed_bytes = packed.size() * sizeof(packed[0]); | 902 const size_t packed_bytes = packed.size() * sizeof(packed[0]); |
895 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes); | 903 LOG(INFO) << "Packed R_ARM_RELATIVE: " << packed_bytes << " bytes"; |
896 std::vector<Elf32_Rel> relative_relocations; | 904 std::vector<Elf32_Rel> relative_relocations; |
897 RelocationPacker packer; | 905 RelocationPacker packer; |
898 packer.UnpackRelativeRelocations(packed, &relative_relocations); | 906 packer.UnpackRelativeRelocations(packed, &relative_relocations); |
899 const size_t unpacked_bytes = | 907 const size_t unpacked_bytes = |
900 relative_relocations.size() * sizeof(relative_relocations[0]); | 908 relative_relocations.size() * sizeof(relative_relocations[0]); |
901 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", unpacked_bytes); | 909 LOG(INFO) << "Unpacked R_ARM_RELATIVE: " << unpacked_bytes << " bytes"; |
902 | 910 |
903 // Retrieve the current .rel.dyn section data. | 911 // Retrieve the current .rel.dyn section data. |
904 data = GetSectionData(rel_dyn_section_); | 912 data = GetSectionData(rel_dyn_section_); |
905 | 913 |
906 // Interpret data as Elf32 relocations. | 914 // Interpret data as Elf32 relocations. |
907 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); | 915 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); |
908 std::vector<Elf32_Rel> relocations( | 916 std::vector<Elf32_Rel> relocations( |
909 relocations_base, | 917 relocations_base, |
910 relocations_base + data->d_size / sizeof(relocations[0])); | 918 relocations_base + data->d_size / sizeof(relocations[0])); |
911 | 919 |
912 std::vector<Elf32_Rel> other_relocations; | 920 std::vector<Elf32_Rel> other_relocations; |
913 size_t padding = 0; | 921 size_t padding = 0; |
914 | 922 |
915 // Filter relocations to locate any that are R_ARM_NONE. These will occur | 923 // Filter relocations to locate any that are R_ARM_NONE. These will occur |
916 // if padding was turned on for packing. | 924 // if padding was turned on for packing. |
917 for (size_t i = 0; i < relocations.size(); ++i) { | 925 for (size_t i = 0; i < relocations.size(); ++i) { |
918 const Elf32_Rel& relocation = relocations[i]; | 926 const Elf32_Rel& relocation = relocations[i]; |
919 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) { | 927 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) { |
920 other_relocations.push_back(relocation); | 928 other_relocations.push_back(relocation); |
921 } else { | 929 } else { |
922 ++padding; | 930 ++padding; |
923 } | 931 } |
924 } | 932 } |
925 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); | 933 LOG(INFO) << "R_ARM_RELATIVE: " << relative_relocations.size() << " entries"; |
926 LOG("Other : %lu entries\n", other_relocations.size()); | 934 LOG(INFO) << "Other : " << other_relocations.size() << " entries"; |
927 | 935 |
928 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we | 936 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we |
929 // hold as unpacked relative relocations, then this is a padded file. | 937 // hold as unpacked relative relocations, then this is a padded file. |
930 const bool is_padded = padding == relative_relocations.size(); | 938 const bool is_padded = padding == relative_relocations.size(); |
931 | 939 |
932 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the | 940 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the |
933 // hole, and pre-adjust all relocation offsets accordingly. | 941 // hole, and pre-adjust all relocation offsets accordingly. |
934 if (!is_padded) { | 942 if (!is_padded) { |
935 // Pre-calculate the size of the hole we will open up when we rewrite | 943 // Pre-calculate the size of the hole we will open up when we rewrite |
936 // .rel.dyn. We have to adjust relocation addresses to account for this. | 944 // .rel.dyn. We have to adjust relocation addresses to account for this. |
937 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | 945 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); |
938 const Elf32_Off hole_start = section_header->sh_offset; | 946 const Elf32_Off hole_start = section_header->sh_offset; |
939 size_t hole_size = | 947 size_t hole_size = |
940 relative_relocations.size() * sizeof(relative_relocations[0]); | 948 relative_relocations.size() * sizeof(relative_relocations[0]); |
941 | 949 |
942 // Adjust the hole size for the padding added to preserve alignment. | 950 // Adjust the hole size for the padding added to preserve alignment. |
943 hole_size -= padding * sizeof(other_relocations[0]); | 951 hole_size -= padding * sizeof(other_relocations[0]); |
944 LOG("Expansion : %lu bytes\n", hole_size); | 952 LOG(INFO) << "Expansion : " << hole_size << " bytes"; |
945 | 953 |
946 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the | 954 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the |
947 // area it will occupy once the hole in .rel.dyn is opened. | 955 // area it will occupy once the hole in .rel.dyn is opened. |
948 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations); | 956 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations); |
949 // Relocate the relocations. | 957 // Relocate the relocations. |
950 AdjustRelocations(hole_start, hole_size, &relative_relocations); | 958 AdjustRelocations(hole_start, hole_size, &relative_relocations); |
951 AdjustRelocations(hole_start, hole_size, &other_relocations); | 959 AdjustRelocations(hole_start, hole_size, &other_relocations); |
952 } | 960 } |
953 | 961 |
954 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations | 962 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations |
955 // followed by other relocations. This is the usual order in which we find | 963 // followed by other relocations. This is the usual order in which we find |
956 // them after linking, so this action will normally put the entire .rel.dyn | 964 // them after linking, so this action will normally put the entire .rel.dyn |
957 // section back to its pre-split-and-packed state. | 965 // section back to its pre-split-and-packed state. |
958 relocations.assign(relative_relocations.begin(), relative_relocations.end()); | 966 relocations.assign(relative_relocations.begin(), relative_relocations.end()); |
959 relocations.insert(relocations.end(), | 967 relocations.insert(relocations.end(), |
960 other_relocations.begin(), other_relocations.end()); | 968 other_relocations.begin(), other_relocations.end()); |
961 const void* section_data = &relocations[0]; | 969 const void* section_data = &relocations[0]; |
962 const size_t bytes = relocations.size() * sizeof(relocations[0]); | 970 const size_t bytes = relocations.size() * sizeof(relocations[0]); |
963 LOG("Total : %lu entries\n", relocations.size()); | 971 LOG(INFO) << "Total : " << relocations.size() << " entries"; |
964 ResizeSection(elf_, rel_dyn_section_, bytes); | 972 ResizeSection(elf_, rel_dyn_section_, bytes); |
965 RewriteSectionData(data, section_data, bytes); | 973 RewriteSectionData(data, section_data, bytes); |
966 | 974 |
967 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte | 975 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte |
968 // stub so that some data remains allocated to the section. This is a | 976 // stub so that some data remains allocated to the section. This is a |
969 // convenience which allows us to re-pack this file again without | 977 // convenience which allows us to re-pack this file again without |
970 // having to remove the section and then add a new small one with objcopy. | 978 // having to remove the section and then add a new small one with objcopy. |
971 // The way we resize sections relies on there being some data in a section. | 979 // The way we resize sections relies on there being some data in a section. |
972 data = GetSectionData(android_rel_dyn_section_); | 980 data = GetSectionData(android_rel_dyn_section_); |
973 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier)); | 981 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier)); |
(...skipping 18 matching lines...) Expand all Loading... |
992 // Flush rewritten shared object file data. | 1000 // Flush rewritten shared object file data. |
993 void ElfFile::Flush() { | 1001 void ElfFile::Flush() { |
994 // Flag all ELF data held in memory as needing to be written back to the | 1002 // Flag all ELF data held in memory as needing to be written back to the |
995 // file, and tell libelf that we have controlled the file layout. | 1003 // file, and tell libelf that we have controlled the file layout. |
996 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY); | 1004 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY); |
997 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT); | 1005 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT); |
998 | 1006 |
999 // Write ELF data back to disk. | 1007 // Write ELF data back to disk. |
1000 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE); | 1008 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE); |
1001 CHECK(file_bytes > 0); | 1009 CHECK(file_bytes > 0); |
1002 VLOG("elf_update returned: %lu\n", file_bytes); | 1010 VLOG(1) << "elf_update returned: " << file_bytes; |
1003 | 1011 |
1004 // Clean up libelf, and truncate the output file to the number of bytes | 1012 // Clean up libelf, and truncate the output file to the number of bytes |
1005 // written by elf_update(). | 1013 // written by elf_update(). |
1006 elf_end(elf_); | 1014 elf_end(elf_); |
1007 elf_ = NULL; | 1015 elf_ = NULL; |
1008 const int truncate = ftruncate(fd_, file_bytes); | 1016 const int truncate = ftruncate(fd_, file_bytes); |
1009 CHECK(truncate == 0); | 1017 CHECK(truncate == 0); |
1010 } | 1018 } |
1011 | 1019 |
1012 } // namespace relocation_packer | 1020 } // namespace relocation_packer |
OLD | NEW |