Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: tools/relocation_packer/src/elf_file.cc

Issue 404553003: Create builds configured for ARM and AARCH64. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Purge unused includes. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // TODO(simonb): Extend for 64-bit target libraries.
6
7 #include "elf_file.h" 5 #include "elf_file.h"
8 6
9 #include <stdlib.h> 7 #include <stdlib.h>
10 #include <sys/types.h> 8 #include <sys/types.h>
11 #include <unistd.h> 9 #include <unistd.h>
12 #include <string> 10 #include <string>
13 #include <vector> 11 #include <vector>
14 12
15 #include "debug.h" 13 #include "debug.h"
14 #include "elf_traits.h"
16 #include "libelf.h" 15 #include "libelf.h"
17 #include "packer.h" 16 #include "packer.h"
18 17
19 namespace relocation_packer { 18 namespace relocation_packer {
20 19
21 // Stub identifier written to 'null out' packed data, "NULL". 20 // Stub identifier written to 'null out' packed data, "NULL".
22 static const Elf32_Word kStubIdentifier = 0x4c4c554eu; 21 static const uint32_t kStubIdentifier = 0x4c4c554eu;
23 22
24 // Out-of-band dynamic tags used to indicate the offset and size of the 23 // Out-of-band dynamic tags used to indicate the offset and size of the
25 // .android.rel.dyn section. 24 // .android.rel.dyn section.
26 static const Elf32_Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC; 25 static const ELF::Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC;
27 static const Elf32_Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1; 26 static const ELF::Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1;
rmcilroy 2014/07/18 14:05:17 Should these be architecture specific? If not, ma
simonb (inactive) 2014/07/21 12:15:49 DT_LOPROC is in ELF's "processor-specific" range,
rmcilroy 2014/07/21 15:22:00 I still think these should be renamed to DT_ANDROI
simonb (inactive) 2014/07/21 16:02:19 Done.
28 27
29 // Alignment to preserve, in bytes. This must be at least as large as the 28 // Alignment to preserve, in bytes. This must be at least as large as the
30 // largest d_align and sh_addralign values found in the loaded file. 29 // largest d_align and sh_addralign values found in the loaded file.
31 static const size_t kPreserveAlignment = 256; 30 static const size_t kPreserveAlignment = 256;
32 31
33 namespace { 32 namespace {
34 33
35 // Get section data. Checks that the section has exactly one data entry, 34 // Get section data. Checks that the section has exactly one data entry,
36 // so that the section size and the data size are the same. True in 35 // so that the section size and the data size are the same. True in
37 // practice for all sections we resize when packing or unpacking. Done 36 // practice for all sections we resize when packing or unpacking. Done
(...skipping 10 matching lines...) Expand all
48 void RewriteSectionData(Elf_Data* data, 47 void RewriteSectionData(Elf_Data* data,
49 const void* section_data, 48 const void* section_data,
50 size_t size) { 49 size_t size) {
51 CHECK(size == data->d_size); 50 CHECK(size == data->d_size);
52 uint8_t* area = new uint8_t[size]; 51 uint8_t* area = new uint8_t[size];
53 memcpy(area, section_data, size); 52 memcpy(area, section_data, size);
54 data->d_buf = area; 53 data->d_buf = area;
55 } 54 }
56 55
57 // Verbose ELF header logging. 56 // Verbose ELF header logging.
58 void VerboseLogElfHeader(const Elf32_Ehdr* elf_header) { 57 void VerboseLogElfHeader(const ELF::Ehdr* elf_header) {
59 VLOG(1) << "e_phoff = " << elf_header->e_phoff; 58 VLOG(1) << "e_phoff = " << elf_header->e_phoff;
60 VLOG(1) << "e_shoff = " << elf_header->e_shoff; 59 VLOG(1) << "e_shoff = " << elf_header->e_shoff;
61 VLOG(1) << "e_ehsize = " << elf_header->e_ehsize; 60 VLOG(1) << "e_ehsize = " << elf_header->e_ehsize;
62 VLOG(1) << "e_phentsize = " << elf_header->e_phentsize; 61 VLOG(1) << "e_phentsize = " << elf_header->e_phentsize;
63 VLOG(1) << "e_phnum = " << elf_header->e_phnum; 62 VLOG(1) << "e_phnum = " << elf_header->e_phnum;
64 VLOG(1) << "e_shnum = " << elf_header->e_shnum; 63 VLOG(1) << "e_shnum = " << elf_header->e_shnum;
65 VLOG(1) << "e_shstrndx = " << elf_header->e_shstrndx; 64 VLOG(1) << "e_shstrndx = " << elf_header->e_shstrndx;
66 } 65 }
67 66
68 // Verbose ELF program header logging. 67 // Verbose ELF program header logging.
69 void VerboseLogProgramHeader(size_t program_header_index, 68 void VerboseLogProgramHeader(size_t program_header_index,
70 const Elf32_Phdr* program_header) { 69 const ELF::Phdr* program_header) {
71 std::string type; 70 std::string type;
72 switch (program_header->p_type) { 71 switch (program_header->p_type) {
73 case PT_NULL: type = "NULL"; break; 72 case PT_NULL: type = "NULL"; break;
74 case PT_LOAD: type = "LOAD"; break; 73 case PT_LOAD: type = "LOAD"; break;
75 case PT_DYNAMIC: type = "DYNAMIC"; break; 74 case PT_DYNAMIC: type = "DYNAMIC"; break;
76 case PT_INTERP: type = "INTERP"; break; 75 case PT_INTERP: type = "INTERP"; break;
77 case PT_NOTE: type = "NOTE"; break; 76 case PT_NOTE: type = "NOTE"; break;
78 case PT_SHLIB: type = "SHLIB"; break; 77 case PT_SHLIB: type = "SHLIB"; break;
79 case PT_PHDR: type = "PHDR"; break; 78 case PT_PHDR: type = "PHDR"; break;
80 case PT_TLS: type = "TLS"; break; 79 case PT_TLS: type = "TLS"; break;
81 default: type = "(OTHER)"; break; 80 default: type = "(OTHER)"; break;
82 } 81 }
83 VLOG(1) << "phdr " << program_header_index << " : " << type; 82 VLOG(1) << "phdr " << program_header_index << " : " << type;
84 VLOG(1) << " p_offset = " << program_header->p_offset; 83 VLOG(1) << " p_offset = " << program_header->p_offset;
85 VLOG(1) << " p_vaddr = " << program_header->p_vaddr; 84 VLOG(1) << " p_vaddr = " << program_header->p_vaddr;
86 VLOG(1) << " p_paddr = " << program_header->p_paddr; 85 VLOG(1) << " p_paddr = " << program_header->p_paddr;
87 VLOG(1) << " p_filesz = " << program_header->p_filesz; 86 VLOG(1) << " p_filesz = " << program_header->p_filesz;
88 VLOG(1) << " p_memsz = " << program_header->p_memsz; 87 VLOG(1) << " p_memsz = " << program_header->p_memsz;
89 } 88 }
90 89
91 // Verbose ELF section header logging. 90 // Verbose ELF section header logging.
92 void VerboseLogSectionHeader(const std::string& section_name, 91 void VerboseLogSectionHeader(const std::string& section_name,
93 const Elf32_Shdr* section_header) { 92 const ELF::Shdr* section_header) {
94 VLOG(1) << "section " << section_name; 93 VLOG(1) << "section " << section_name;
95 VLOG(1) << " sh_addr = " << section_header->sh_addr; 94 VLOG(1) << " sh_addr = " << section_header->sh_addr;
96 VLOG(1) << " sh_offset = " << section_header->sh_offset; 95 VLOG(1) << " sh_offset = " << section_header->sh_offset;
97 VLOG(1) << " sh_size = " << section_header->sh_size; 96 VLOG(1) << " sh_size = " << section_header->sh_size;
98 VLOG(1) << " sh_addralign = " << section_header->sh_addralign; 97 VLOG(1) << " sh_addralign = " << section_header->sh_addralign;
99 } 98 }
100 99
101 // Verbose ELF section data logging. 100 // Verbose ELF section data logging.
102 void VerboseLogSectionData(const Elf_Data* data) { 101 void VerboseLogSectionData(const Elf_Data* data) {
103 VLOG(1) << " data"; 102 VLOG(1) << " data";
104 VLOG(1) << " d_buf = " << data->d_buf; 103 VLOG(1) << " d_buf = " << data->d_buf;
105 VLOG(1) << " d_off = " << data->d_off; 104 VLOG(1) << " d_off = " << data->d_off;
106 VLOG(1) << " d_size = " << data->d_size; 105 VLOG(1) << " d_size = " << data->d_size;
107 VLOG(1) << " d_align = " << data->d_align; 106 VLOG(1) << " d_align = " << data->d_align;
108 } 107 }
109 108
110 } // namespace 109 } // namespace
111 110
112 // Load the complete ELF file into a memory image in libelf, and identify 111 // Load the complete ELF file into a memory image in libelf, and identify
113 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the 112 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the
114 // ELF file has already been loaded. 113 // ELF file has already been loaded.
115 bool ElfFile::Load() { 114 bool ElfFile::Load() {
116 if (elf_) 115 if (elf_)
117 return true; 116 return true;
118 117
119 elf_ = elf_begin(fd_, ELF_C_RDWR, NULL); 118 Elf* elf = elf_begin(fd_, ELF_C_RDWR, NULL);
120 CHECK(elf_); 119 CHECK(elf);
121 120
122 if (elf_kind(elf_) != ELF_K_ELF) { 121 if (elf_kind(elf) != ELF_K_ELF) {
123 LOG(ERROR) << "File not in ELF format"; 122 LOG(ERROR) << "File not in ELF format";
124 return false; 123 return false;
125 } 124 }
126 125
127 Elf32_Ehdr* elf_header = elf32_getehdr(elf_); 126 ELF::Ehdr* elf_header = ELF::getehdr(elf);
128 if (!elf_header) { 127 if (!elf_header) {
129 LOG(ERROR) << "Failed to load ELF header"; 128 LOG(ERROR) << "Failed to load ELF header: " << elf_errmsg(elf_errno());
130 return false; 129 return false;
131 } 130 }
132 if (elf_header->e_machine != EM_ARM) { 131 if (elf_header->e_machine != ELF::kMachine) {
133 LOG(ERROR) << "File is not an arm32 ELF file"; 132 LOG(ERROR) << "ELF file architecture is not " << ELF::Machine();
134 return false; 133 return false;
135 } 134 }
136 135
137 // Require that our endianness matches that of the target, and that both 136 // Require that our endianness matches that of the target, and that both
138 // are little-endian. Safe for all current build/target combinations. 137 // are little-endian. Safe for all current build/target combinations.
139 const int endian = static_cast<int>(elf_header->e_ident[5]); 138 const int endian = elf_header->e_ident[EI_DATA];
140 CHECK(endian == ELFDATA2LSB); 139 CHECK(endian == ELFDATA2LSB);
141 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__); 140 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
142 141
143 VLOG(1) << "endian = " << endian; 142 // Also require that the file class is as expected.
143 const int file_class = elf_header->e_ident[EI_CLASS];
144 CHECK(file_class == ELF::kFileClass);
145
146 VLOG(1) << "endian = " << endian << ", file class = " << file_class;
144 VerboseLogElfHeader(elf_header); 147 VerboseLogElfHeader(elf_header);
145 148
146 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_); 149 const ELF::Phdr* elf_program_header = ELF::getphdr(elf);
147 CHECK(elf_program_header); 150 CHECK(elf_program_header);
148 151
149 const Elf32_Phdr* dynamic_program_header = NULL; 152 const ELF::Phdr* dynamic_program_header = NULL;
150 for (size_t i = 0; i < elf_header->e_phnum; ++i) { 153 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
151 const Elf32_Phdr* program_header = &elf_program_header[i]; 154 const ELF::Phdr* program_header = &elf_program_header[i];
152 VerboseLogProgramHeader(i, program_header); 155 VerboseLogProgramHeader(i, program_header);
153 156
154 if (program_header->p_type == PT_DYNAMIC) { 157 if (program_header->p_type == PT_DYNAMIC) {
155 CHECK(dynamic_program_header == NULL); 158 CHECK(dynamic_program_header == NULL);
156 dynamic_program_header = program_header; 159 dynamic_program_header = program_header;
157 } 160 }
158 } 161 }
159 CHECK(dynamic_program_header != NULL); 162 CHECK(dynamic_program_header != NULL);
160 163
161 size_t string_index; 164 size_t string_index;
162 elf_getshdrstrndx(elf_, &string_index); 165 elf_getshdrstrndx(elf, &string_index);
163 166
164 // Notes of the .rel.dyn, .android.rel.dyn, and .dynamic sections. Found 167 // Notes of the .rel.dyn, .android.rel.dyn, and .dynamic sections. Found
165 // while iterating sections, and later stored in class attributes. 168 // while iterating sections, and later stored in class attributes.
166 Elf_Scn* found_rel_dyn_section = NULL; 169 Elf_Scn* found_rel_dyn_section = NULL;
167 Elf_Scn* found_android_rel_dyn_section = NULL; 170 Elf_Scn* found_android_rel_dyn_section = NULL;
168 Elf_Scn* found_dynamic_section = NULL; 171 Elf_Scn* found_dynamic_section = NULL;
169 172
170 // Flag set if we encounter any .debug* section. We do not adjust any 173 // Flag set if we encounter any .debug* section. We do not adjust any
171 // offsets or addresses of any debug data, so if we find one of these then 174 // offsets or addresses of any debug data, so if we find one of these then
172 // the resulting output shared object should still run, but might not be 175 // the resulting output shared object should still run, but might not be
173 // usable for debugging, disassembly, and so on. Provides a warning if 176 // usable for debugging, disassembly, and so on. Provides a warning if
174 // this occurs. 177 // this occurs.
175 bool has_debug_section = false; 178 bool has_debug_section = false;
176 179
177 Elf_Scn* section = NULL; 180 Elf_Scn* section = NULL;
178 while ((section = elf_nextscn(elf_, section)) != NULL) { 181 while ((section = elf_nextscn(elf, section)) != NULL) {
179 const Elf32_Shdr* section_header = elf32_getshdr(section); 182 const ELF::Shdr* section_header = ELF::getshdr(section);
180 std::string name = elf_strptr(elf_, string_index, section_header->sh_name); 183 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
181 VerboseLogSectionHeader(name, section_header); 184 VerboseLogSectionHeader(name, section_header);
182 185
183 // Note special sections as we encounter them. 186 // Note special sections as we encounter them.
184 if (name == ".rel.dyn") { 187 if (name == ".rel.dyn") {
185 found_rel_dyn_section = section; 188 found_rel_dyn_section = section;
186 } 189 }
187 if (name == ".android.rel.dyn") { 190 if (name == ".android.rel.dyn") {
188 found_android_rel_dyn_section = section; 191 found_android_rel_dyn_section = section;
189 } 192 }
190 if (section_header->sh_offset == dynamic_program_header->p_offset) { 193 if (section_header->sh_offset == dynamic_program_header->p_offset) {
(...skipping 28 matching lines...) Expand all
219 LOG(ERROR) << "Missing .android.rel.dyn section " 222 LOG(ERROR) << "Missing .android.rel.dyn section "
220 << "(to fix, run with --help and follow the pre-packing " 223 << "(to fix, run with --help and follow the pre-packing "
221 << "instructions)"; 224 << "instructions)";
222 return false; 225 return false;
223 } 226 }
224 227
225 if (has_debug_section) { 228 if (has_debug_section) {
226 LOG(WARNING) << "Found .debug section(s), and ignored them"; 229 LOG(WARNING) << "Found .debug section(s), and ignored them";
227 } 230 }
228 231
232 elf_ = elf;
229 rel_dyn_section_ = found_rel_dyn_section; 233 rel_dyn_section_ = found_rel_dyn_section;
230 dynamic_section_ = found_dynamic_section; 234 dynamic_section_ = found_dynamic_section;
231 android_rel_dyn_section_ = found_android_rel_dyn_section; 235 android_rel_dyn_section_ = found_android_rel_dyn_section;
232 return true; 236 return true;
233 } 237 }
234 238
235 namespace { 239 namespace {
236 240
237 // Helper for ResizeSection(). Adjust the main ELF header for the hole. 241 // Helper for ResizeSection(). Adjust the main ELF header for the hole.
238 void AdjustElfHeaderForHole(Elf32_Ehdr* elf_header, 242 void AdjustElfHeaderForHole(ELF::Ehdr* elf_header,
239 Elf32_Off hole_start, 243 ELF::Off hole_start,
240 int32_t hole_size) { 244 ssize_t hole_size) {
241 if (elf_header->e_phoff > hole_start) { 245 if (elf_header->e_phoff > hole_start) {
242 elf_header->e_phoff += hole_size; 246 elf_header->e_phoff += hole_size;
243 VLOG(1) << "e_phoff adjusted to " << elf_header->e_phoff; 247 VLOG(1) << "e_phoff adjusted to " << elf_header->e_phoff;
244 } 248 }
245 if (elf_header->e_shoff > hole_start) { 249 if (elf_header->e_shoff > hole_start) {
246 elf_header->e_shoff += hole_size; 250 elf_header->e_shoff += hole_size;
247 VLOG(1) << "e_shoff adjusted to " << elf_header->e_shoff; 251 VLOG(1) << "e_shoff adjusted to " << elf_header->e_shoff;
248 } 252 }
249 } 253 }
250 254
251 // Helper for ResizeSection(). Adjust all program headers for the hole. 255 // Helper for ResizeSection(). Adjust all program headers for the hole.
252 void AdjustProgramHeadersForHole(Elf32_Phdr* elf_program_header, 256 void AdjustProgramHeadersForHole(ELF::Phdr* elf_program_header,
253 size_t program_header_count, 257 size_t program_header_count,
254 Elf32_Off hole_start, 258 ELF::Off hole_start,
255 int32_t hole_size) { 259 ssize_t hole_size) {
256 for (size_t i = 0; i < program_header_count; ++i) { 260 for (size_t i = 0; i < program_header_count; ++i) {
257 Elf32_Phdr* program_header = &elf_program_header[i]; 261 ELF::Phdr* program_header = &elf_program_header[i];
258 262
259 if (program_header->p_offset > hole_start) { 263 if (program_header->p_offset > hole_start) {
260 // The hole start is past this segment, so adjust offsets and addrs. 264 // The hole start is past this segment, so adjust offsets and addrs.
261 program_header->p_offset += hole_size; 265 program_header->p_offset += hole_size;
262 VLOG(1) << "phdr " << i 266 VLOG(1) << "phdr " << i
263 << " p_offset adjusted to "<< program_header->p_offset; 267 << " p_offset adjusted to "<< program_header->p_offset;
264 268
265 // Only adjust vaddr and paddr if this program header has them. 269 // Only adjust vaddr and paddr if this program header has them.
266 if (program_header->p_vaddr != 0) { 270 if (program_header->p_vaddr != 0) {
267 program_header->p_vaddr += hole_size; 271 program_header->p_vaddr += hole_size;
(...skipping 14 matching lines...) Expand all
282 << " p_filesz adjusted to " << program_header->p_filesz; 286 << " p_filesz adjusted to " << program_header->p_filesz;
283 program_header->p_memsz += hole_size; 287 program_header->p_memsz += hole_size;
284 VLOG(1) << "phdr " << i 288 VLOG(1) << "phdr " << i
285 << " p_memsz adjusted to " << program_header->p_memsz; 289 << " p_memsz adjusted to " << program_header->p_memsz;
286 } 290 }
287 } 291 }
288 } 292 }
289 293
290 // Helper for ResizeSection(). Adjust all section headers for the hole. 294 // Helper for ResizeSection(). Adjust all section headers for the hole.
291 void AdjustSectionHeadersForHole(Elf* elf, 295 void AdjustSectionHeadersForHole(Elf* elf,
292 Elf32_Off hole_start, 296 ELF::Off hole_start,
293 int32_t hole_size) { 297 ssize_t hole_size) {
294 size_t string_index; 298 size_t string_index;
295 elf_getshdrstrndx(elf, &string_index); 299 elf_getshdrstrndx(elf, &string_index);
296 300
297 Elf_Scn* section = NULL; 301 Elf_Scn* section = NULL;
298 while ((section = elf_nextscn(elf, section)) != NULL) { 302 while ((section = elf_nextscn(elf, section)) != NULL) {
299 Elf32_Shdr* section_header = elf32_getshdr(section); 303 ELF::Shdr* section_header = ELF::getshdr(section);
300 std::string name = elf_strptr(elf, string_index, section_header->sh_name); 304 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
301 305
302 if (section_header->sh_offset > hole_start) { 306 if (section_header->sh_offset > hole_start) {
303 section_header->sh_offset += hole_size; 307 section_header->sh_offset += hole_size;
304 VLOG(1) << "section " << name 308 VLOG(1) << "section " << name
305 << " sh_offset adjusted to " << section_header->sh_offset; 309 << " sh_offset adjusted to " << section_header->sh_offset;
306 // Only adjust section addr if this section has one. 310 // Only adjust section addr if this section has one.
307 if (section_header->sh_addr != 0) { 311 if (section_header->sh_addr != 0) {
308 section_header->sh_addr += hole_size; 312 section_header->sh_addr += hole_size;
309 VLOG(1) << "section " << name 313 VLOG(1) << "section " << name
310 << " sh_addr adjusted to " << section_header->sh_addr; 314 << " sh_addr adjusted to " << section_header->sh_addr;
311 } 315 }
312 } 316 }
313 } 317 }
314 } 318 }
315 319
316 // Helper for ResizeSection(). Adjust the .dynamic section for the hole. 320 // Helper for ResizeSection(). Adjust the .dynamic section for the hole.
317 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section, 321 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section,
318 bool is_rel_dyn_resize, 322 bool is_rel_dyn_resize,
319 Elf32_Off hole_start, 323 ELF::Off hole_start,
320 int32_t hole_size) { 324 ssize_t hole_size) {
321 Elf_Data* data = GetSectionData(dynamic_section); 325 Elf_Data* data = GetSectionData(dynamic_section);
322 326
323 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf); 327 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
324 std::vector<Elf32_Dyn> dynamics( 328 std::vector<ELF::Dyn> dynamics(
325 dynamic_base, 329 dynamic_base,
326 dynamic_base + data->d_size / sizeof(dynamics[0])); 330 dynamic_base + data->d_size / sizeof(dynamics[0]));
327 331
328 for (size_t i = 0; i < dynamics.size(); ++i) { 332 for (size_t i = 0; i < dynamics.size(); ++i) {
329 Elf32_Dyn* dynamic = &dynamics[i]; 333 ELF::Dyn* dynamic = &dynamics[i];
330 const Elf32_Sword tag = dynamic->d_tag; 334 const ELF::Sword tag = dynamic->d_tag;
331 // Any tags that hold offsets are adjustment candidates. 335 // Any tags that hold offsets are adjustment candidates.
332 const bool is_adjustable = (tag == DT_PLTGOT || 336 const bool is_adjustable = (tag == DT_PLTGOT ||
333 tag == DT_HASH || 337 tag == DT_HASH ||
334 tag == DT_STRTAB || 338 tag == DT_STRTAB ||
335 tag == DT_SYMTAB || 339 tag == DT_SYMTAB ||
336 tag == DT_RELA || 340 tag == DT_RELA ||
337 tag == DT_INIT || 341 tag == DT_INIT ||
338 tag == DT_FINI || 342 tag == DT_FINI ||
339 tag == DT_REL || 343 tag == DT_REL ||
340 tag == DT_JMPREL || 344 tag == DT_JMPREL ||
341 tag == DT_INIT_ARRAY || 345 tag == DT_INIT_ARRAY ||
342 tag == DT_FINI_ARRAY || 346 tag == DT_FINI_ARRAY ||
343 tag == DT_ANDROID_ARM_REL_OFFSET); 347 tag == DT_ANDROID_ARM_REL_OFFSET);
344 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) { 348 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) {
345 dynamic->d_un.d_ptr += hole_size; 349 dynamic->d_un.d_ptr += hole_size;
346 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag 350 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
347 << " d_ptr adjusted to " << dynamic->d_un.d_ptr; 351 << " d_ptr adjusted to " << dynamic->d_un.d_ptr;
348 } 352 }
349 353
350 // If we are specifically resizing .rel.dyn, we need to make some added 354 // If we are specifically resizing .rel.dyn, we need to make some added
351 // adjustments to tags that indicate the counts of R_ARM_RELATIVE 355 // adjustments to tags that indicate the counts of ARM relative
rmcilroy 2014/07/18 14:05:17 ditto (and throughout CL)
simonb (inactive) 2014/07/21 12:15:49 Still holds if ARM includes arm32 and arm64. I ha
rmcilroy 2014/07/21 15:24:52 As mentioned offline, I still think it would be be
352 // relocations in the shared object. 356 // relocations in the shared object.
353 if (is_rel_dyn_resize) { 357 if (!is_rel_dyn_resize)
354 // DT_RELSZ is the overall size of relocations. Adjust by hole size. 358 continue;
355 if (tag == DT_RELSZ) {
356 dynamic->d_un.d_val += hole_size;
357 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
358 << " d_val adjusted to " << dynamic->d_un.d_val;
359 }
360 359
361 // DT_RELCOUNT is the count of relative relocations. Packing reduces it 360 // DT_RELSZ is the overall size of relocations. Adjust by hole size.
362 // to the alignment padding, if any; unpacking restores it to its former 361 if (tag == DT_RELSZ) {
363 // value. The crazy linker does not use it, but we update it anyway. 362 dynamic->d_un.d_val += hole_size;
364 if (tag == DT_RELCOUNT) { 363 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
365 // Cast sizeof to a signed type to avoid the division result being 364 << " d_val adjusted to " << dynamic->d_un.d_val;
366 // promoted into an unsigned size_t. 365 }
367 const ssize_t sizeof_rel = static_cast<ssize_t>(sizeof(Elf32_Rel));
368 dynamic->d_un.d_val += hole_size / sizeof_rel;
369 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
370 << " d_val adjusted to " << dynamic->d_un.d_val;
371 }
372 366
373 // DT_RELENT doesn't change, but make sure it is what we expect. 367 // DT_RELCOUNT is the count of relative relocations. Packing reduces it
374 if (tag == DT_RELENT) { 368 // to the alignment padding, if any; unpacking restores it to its former
375 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel)); 369 // value. The crazy linker does not use it, but we update it anyway.
376 } 370 if (tag == DT_RELCOUNT) {
371 // Cast sizeof to a signed type to avoid the division result being
372 // promoted into an unsigned size_t.
373 const ssize_t sizeof_rel = static_cast<ssize_t>(sizeof(Elf32_Rel));
374 dynamic->d_un.d_val += hole_size / sizeof_rel;
375 VLOG(1) << "dynamic[" << i << "] " << dynamic->d_tag
376 << " d_val adjusted to " << dynamic->d_un.d_val;
377 }
378
379 // DT_RELENT doesn't change, but make sure it is what we expect.
380 if (tag == DT_RELENT) {
381 CHECK(dynamic->d_un.d_val == sizeof(ELF::Rel));
377 } 382 }
378 } 383 }
379 384
380 void* section_data = &dynamics[0]; 385 void* section_data = &dynamics[0];
381 size_t bytes = dynamics.size() * sizeof(dynamics[0]); 386 size_t bytes = dynamics.size() * sizeof(dynamics[0]);
382 RewriteSectionData(data, section_data, bytes); 387 RewriteSectionData(data, section_data, bytes);
383 } 388 }
384 389
385 // Helper for ResizeSection(). Adjust the .dynsym section for the hole. 390 // Helper for ResizeSection(). Adjust the .dynsym section for the hole.
386 // We need to adjust the values for the symbols represented in it. 391 // We need to adjust the values for the symbols represented in it.
387 void AdjustDynSymSectionForHole(Elf_Scn* dynsym_section, 392 void AdjustDynSymSectionForHole(Elf_Scn* dynsym_section,
388 Elf32_Off hole_start, 393 ELF::Off hole_start,
389 int32_t hole_size) { 394 ssize_t hole_size) {
390 Elf_Data* data = GetSectionData(dynsym_section); 395 Elf_Data* data = GetSectionData(dynsym_section);
391 396
392 const Elf32_Sym* dynsym_base = reinterpret_cast<Elf32_Sym*>(data->d_buf); 397 const ELF::Sym* dynsym_base = reinterpret_cast<ELF::Sym*>(data->d_buf);
393 std::vector<Elf32_Sym> dynsyms 398 std::vector<ELF::Sym> dynsyms
394 (dynsym_base, 399 (dynsym_base,
395 dynsym_base + data->d_size / sizeof(dynsyms[0])); 400 dynsym_base + data->d_size / sizeof(dynsyms[0]));
396 401
397 for (size_t i = 0; i < dynsyms.size(); ++i) { 402 for (size_t i = 0; i < dynsyms.size(); ++i) {
398 Elf32_Sym* dynsym = &dynsyms[i]; 403 ELF::Sym* dynsym = &dynsyms[i];
399 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info)); 404 const int type = static_cast<int>(ELF_ST_TYPE(dynsym->st_info));
400 const bool is_adjustable = (type == STT_OBJECT || 405 const bool is_adjustable = (type == STT_OBJECT ||
401 type == STT_FUNC || 406 type == STT_FUNC ||
402 type == STT_SECTION || 407 type == STT_SECTION ||
403 type == STT_FILE || 408 type == STT_FILE ||
404 type == STT_COMMON || 409 type == STT_COMMON ||
405 type == STT_TLS); 410 type == STT_TLS);
406 if (is_adjustable && dynsym->st_value > hole_start) { 411 if (is_adjustable && dynsym->st_value > hole_start) {
407 dynsym->st_value += hole_size; 412 dynsym->st_value += hole_size;
408 VLOG(1) << "dynsym[" << i << "] type=" << type 413 VLOG(1) << "dynsym[" << i << "] type=" << type
409 << " st_value adjusted to " << dynsym->st_value; 414 << " st_value adjusted to " << dynsym->st_value;
410 } 415 }
411 } 416 }
412 417
413 void* section_data = &dynsyms[0]; 418 void* section_data = &dynsyms[0];
414 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]); 419 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]);
415 RewriteSectionData(data, section_data, bytes); 420 RewriteSectionData(data, section_data, bytes);
416 } 421 }
417 422
418 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole. 423 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole.
419 // We need to adjust the offset of every relocation inside it that falls 424 // We need to adjust the offset of every relocation inside it that falls
420 // beyond the hole start. 425 // beyond the hole start.
421 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section, 426 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section,
422 Elf32_Off hole_start, 427 ELF::Off hole_start,
423 int32_t hole_size) { 428 ssize_t hole_size) {
424 Elf_Data* data = GetSectionData(relplt_section); 429 Elf_Data* data = GetSectionData(relplt_section);
425 430
426 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); 431 const ELF::Rel* relplt_base = reinterpret_cast<ELF::Rel*>(data->d_buf);
427 std::vector<Elf32_Rel> relplts( 432 std::vector<ELF::Rel> relplts(
428 relplt_base, 433 relplt_base,
429 relplt_base + data->d_size / sizeof(relplts[0])); 434 relplt_base + data->d_size / sizeof(relplts[0]));
430 435
431 for (size_t i = 0; i < relplts.size(); ++i) { 436 for (size_t i = 0; i < relplts.size(); ++i) {
432 Elf32_Rel* relplt = &relplts[i]; 437 ELF::Rel* relplt = &relplts[i];
433 if (relplt->r_offset > hole_start) { 438 if (relplt->r_offset > hole_start) {
434 relplt->r_offset += hole_size; 439 relplt->r_offset += hole_size;
435 VLOG(1) << "relplt[" << i 440 VLOG(1) << "relplt[" << i
436 << "] r_offset adjusted to " << relplt->r_offset; 441 << "] r_offset adjusted to " << relplt->r_offset;
437 } 442 }
438 } 443 }
439 444
440 void* section_data = &relplts[0]; 445 void* section_data = &relplts[0];
441 size_t bytes = relplts.size() * sizeof(relplts[0]); 446 size_t bytes = relplts.size() * sizeof(relplts[0]);
442 RewriteSectionData(data, section_data, bytes); 447 RewriteSectionData(data, section_data, bytes);
443 } 448 }
444 449
445 // Helper for ResizeSection(). Adjust the .symtab section for the hole. 450 // Helper for ResizeSection(). Adjust the .symtab section for the hole.
446 // We want to adjust the value of every symbol in it that falls beyond 451 // We want to adjust the value of every symbol in it that falls beyond
447 // the hole start. 452 // the hole start.
448 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section, 453 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section,
449 Elf32_Off hole_start, 454 ELF::Off hole_start,
450 int32_t hole_size) { 455 ssize_t hole_size) {
451 Elf_Data* data = GetSectionData(symtab_section); 456 Elf_Data* data = GetSectionData(symtab_section);
452 457
453 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf); 458 const ELF::Sym* symtab_base = reinterpret_cast<ELF::Sym*>(data->d_buf);
454 std::vector<Elf32_Sym> symtab( 459 std::vector<ELF::Sym> symtab(
455 symtab_base, 460 symtab_base,
456 symtab_base + data->d_size / sizeof(symtab[0])); 461 symtab_base + data->d_size / sizeof(symtab[0]));
457 462
458 for (size_t i = 0; i < symtab.size(); ++i) { 463 for (size_t i = 0; i < symtab.size(); ++i) {
459 Elf32_Sym* sym = &symtab[i]; 464 ELF::Sym* sym = &symtab[i];
460 if (sym->st_value > hole_start) { 465 if (sym->st_value > hole_start) {
461 sym->st_value += hole_size; 466 sym->st_value += hole_size;
462 VLOG(1) << "symtab[" << i << "] value adjusted to " << sym->st_value; 467 VLOG(1) << "symtab[" << i << "] value adjusted to " << sym->st_value;
463 } 468 }
464 } 469 }
465 470
466 void* section_data = &symtab[0]; 471 void* section_data = &symtab[0];
467 size_t bytes = symtab.size() * sizeof(symtab[0]); 472 size_t bytes = symtab.size() * sizeof(symtab[0]);
468 RewriteSectionData(data, section_data, bytes); 473 RewriteSectionData(data, section_data, bytes);
469 } 474 }
470 475
471 // Resize a section. If the new size is larger than the current size, open 476 // Resize a section. If the new size is larger than the current size, open
472 // up a hole by increasing file offsets that come after the hole. If smaller 477 // up a hole by increasing file offsets that come after the hole. If smaller
473 // than the current size, remove the hole by decreasing those offsets. 478 // than the current size, remove the hole by decreasing those offsets.
474 void ResizeSection(Elf* elf, Elf_Scn* section, size_t new_size) { 479 void ResizeSection(Elf* elf, Elf_Scn* section, size_t new_size) {
475 Elf32_Shdr* section_header = elf32_getshdr(section); 480 ELF::Shdr* section_header = ELF::getshdr(section);
476 if (section_header->sh_size == new_size) 481 if (section_header->sh_size == new_size)
477 return; 482 return;
478 483
479 // Note if we are resizing the real .rel.dyn. If yes, then we have to 484 // Note if we are resizing the real .rel.dyn. If yes, then we have to
480 // massage d_un.d_val in the dynamic section where d_tag is DT_RELSZ and 485 // massage d_un.d_val in the dynamic section where d_tag is DT_RELSZ and
481 // DT_RELCOUNT. 486 // DT_RELCOUNT.
482 size_t string_index; 487 size_t string_index;
483 elf_getshdrstrndx(elf, &string_index); 488 elf_getshdrstrndx(elf, &string_index);
484 const std::string section_name = 489 const std::string section_name =
485 elf_strptr(elf, string_index, section_header->sh_name); 490 elf_strptr(elf, string_index, section_header->sh_name);
486 const bool is_rel_dyn_resize = section_name == ".rel.dyn"; 491 const bool is_rel_dyn_resize = section_name == ".rel.dyn";
487 492
488 // Require that the section size and the data size are the same. True 493 // Require that the section size and the data size are the same. True
489 // in practice for all sections we resize when packing or unpacking. 494 // in practice for all sections we resize when packing or unpacking.
490 Elf_Data* data = GetSectionData(section); 495 Elf_Data* data = GetSectionData(section);
491 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size); 496 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size);
492 497
493 // Require that the section is not zero-length (that is, has allocated 498 // Require that the section is not zero-length (that is, has allocated
494 // data that we can validly expand). 499 // data that we can validly expand).
495 CHECK(data->d_size && data->d_buf); 500 CHECK(data->d_size && data->d_buf);
496 501
497 const Elf32_Off hole_start = section_header->sh_offset; 502 const ELF::Off hole_start = section_header->sh_offset;
498 const int32_t hole_size = new_size - data->d_size; 503 const ssize_t hole_size = new_size - data->d_size;
499 504
500 VLOG_IF(1, (hole_size > 0)) << "expand section size = " << data->d_size; 505 VLOG_IF(1, (hole_size > 0)) << "expand section size = " << data->d_size;
501 VLOG_IF(1, (hole_size < 0)) << "shrink section size = " << data->d_size; 506 VLOG_IF(1, (hole_size < 0)) << "shrink section size = " << data->d_size;
502 507
503 // Resize the data and the section header. 508 // Resize the data and the section header.
504 data->d_size += hole_size; 509 data->d_size += hole_size;
505 section_header->sh_size += hole_size; 510 section_header->sh_size += hole_size;
506 511
507 Elf32_Ehdr* elf_header = elf32_getehdr(elf); 512 ELF::Ehdr* elf_header = ELF::getehdr(elf);
508 Elf32_Phdr* elf_program_header = elf32_getphdr(elf); 513 ELF::Phdr* elf_program_header = ELF::getphdr(elf);
509 514
510 // Add the hole size to all offsets in the ELF file that are after the 515 // Add the hole size to all offsets in the ELF file that are after the
511 // start of the hole. If the hole size is positive we are expanding the 516 // start of the hole. If the hole size is positive we are expanding the
512 // section to create a new hole; if negative, we are closing up a hole. 517 // section to create a new hole; if negative, we are closing up a hole.
513 518
514 // Start with the main ELF header. 519 // Start with the main ELF header.
515 AdjustElfHeaderForHole(elf_header, hole_start, hole_size); 520 AdjustElfHeaderForHole(elf_header, hole_start, hole_size);
516 521
517 // Adjust all program headers. 522 // Adjust all program headers.
518 AdjustProgramHeadersForHole(elf_program_header, 523 AdjustProgramHeadersForHole(elf_program_header,
519 elf_header->e_phnum, 524 elf_header->e_phnum,
520 hole_start, 525 hole_start,
521 hole_size); 526 hole_size);
522 527
523 // Adjust all section headers. 528 // Adjust all section headers.
524 AdjustSectionHeadersForHole(elf, hole_start, hole_size); 529 AdjustSectionHeadersForHole(elf, hole_start, hole_size);
525 530
526 // We use the dynamic program header entry to locate the dynamic section. 531 // We use the dynamic program header entry to locate the dynamic section.
527 const Elf32_Phdr* dynamic_program_header = NULL; 532 const ELF::Phdr* dynamic_program_header = NULL;
528 533
529 // Find the dynamic program header entry. 534 // Find the dynamic program header entry.
530 for (size_t i = 0; i < elf_header->e_phnum; ++i) { 535 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
531 Elf32_Phdr* program_header = &elf_program_header[i]; 536 ELF::Phdr* program_header = &elf_program_header[i];
532 537
533 if (program_header->p_type == PT_DYNAMIC) { 538 if (program_header->p_type == PT_DYNAMIC) {
534 dynamic_program_header = program_header; 539 dynamic_program_header = program_header;
535 } 540 }
536 } 541 }
537 CHECK(dynamic_program_header); 542 CHECK(dynamic_program_header);
538 543
539 // Sections requiring special attention, and the .android.rel.dyn offset. 544 // Sections requiring special attention, and the .android.rel.dyn offset.
540 Elf_Scn* dynamic_section = NULL; 545 Elf_Scn* dynamic_section = NULL;
541 Elf_Scn* dynsym_section = NULL; 546 Elf_Scn* dynsym_section = NULL;
542 Elf_Scn* relplt_section = NULL; 547 Elf_Scn* relplt_section = NULL;
543 Elf_Scn* symtab_section = NULL; 548 Elf_Scn* symtab_section = NULL;
544 Elf32_Off android_rel_dyn_offset = 0; 549 ELF::Off android_rel_dyn_offset = 0;
545 550
546 // Find these sections, and the .android.rel.dyn offset. 551 // Find these sections, and the .android.rel.dyn offset.
547 section = NULL; 552 section = NULL;
548 while ((section = elf_nextscn(elf, section)) != NULL) { 553 while ((section = elf_nextscn(elf, section)) != NULL) {
549 Elf32_Shdr* section_header = elf32_getshdr(section); 554 ELF::Shdr* section_header = ELF::getshdr(section);
550 std::string name = elf_strptr(elf, string_index, section_header->sh_name); 555 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
551 556
552 if (section_header->sh_offset == dynamic_program_header->p_offset) { 557 if (section_header->sh_offset == dynamic_program_header->p_offset) {
553 dynamic_section = section; 558 dynamic_section = section;
554 } 559 }
555 if (name == ".dynsym") { 560 if (name == ".dynsym") {
556 dynsym_section = section; 561 dynsym_section = section;
557 } 562 }
558 if (name == ".rel.plt") { 563 if (name == ".rel.plt") {
559 relplt_section = section; 564 relplt_section = section;
(...skipping 25 matching lines...) Expand all
585 590
586 // Adjust the .rel.plt section for the hole. 591 // Adjust the .rel.plt section for the hole.
587 AdjustRelPltSectionForHole(relplt_section, hole_start, hole_size); 592 AdjustRelPltSectionForHole(relplt_section, hole_start, hole_size);
588 593
589 // If present, adjust the .symtab section for the hole. If the shared 594 // If present, adjust the .symtab section for the hole. If the shared
590 // library was stripped then .symtab will be absent. 595 // library was stripped then .symtab will be absent.
591 if (symtab_section) 596 if (symtab_section)
592 AdjustSymTabSectionForHole(symtab_section, hole_start, hole_size); 597 AdjustSymTabSectionForHole(symtab_section, hole_start, hole_size);
593 } 598 }
594 599
600 // Find the first slot in a dynamics array with the given tag. The array
601 // always ends with a free (unused) element, and which we exclude from the
602 // search. Returns dynamics->size() if not found.
603 size_t FindDynamicEntry(ELF::Sword tag,
604 std::vector<ELF::Dyn>* dynamics) {
605 // Loop until the penultimate entry. We exclude the end sentinel.
606 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
607 if (dynamics->at(i).d_tag == tag)
608 return i;
609 }
610
611 // The tag was not found.
612 return dynamics->size();
613 }
614
595 // Replace the first free (unused) slot in a dynamics vector with the given 615 // Replace the first free (unused) slot in a dynamics vector with the given
596 // value. The vector always ends with a free (unused) element, so the slot 616 // value. The vector always ends with a free (unused) element, so the slot
597 // found cannot be the last one in the vector. 617 // found cannot be the last one in the vector.
598 void AddDynamicEntry(Elf32_Dyn dyn, 618 void AddDynamicEntry(const ELF::Dyn& dyn,
599 std::vector<Elf32_Dyn>* dynamics) { 619 std::vector<ELF::Dyn>* dynamics) {
600 // Loop until the penultimate entry. We cannot replace the end sentinel. 620 const size_t slot = FindDynamicEntry(DT_NULL, dynamics);
601 for (size_t i = 0; i < dynamics->size() - 1; ++i) { 621 if (slot == dynamics->size()) {
602 Elf32_Dyn &slot = dynamics->at(i); 622 LOG(FATAL) << "No spare dynamic array slots found "
603 if (slot.d_tag == DT_NULL) { 623 << "(to fix, increase gold's --spare-dynamic-tags value)";
604 slot = dyn;
605 VLOG(1) << "dynamic[" << i << "] overwritten with " << dyn.d_tag;
606 return;
607 }
608 } 624 }
609 625
610 // No free dynamics vector slot was found. 626 // Replace this entry with the one supplied.
611 LOG(FATAL) << "No spare dynamic vector slots found " 627 dynamics->at(slot) = dyn;
612 << "(to fix, increase gold's --spare-dynamic-tags value)"; 628 VLOG(1) << "dynamic[" << slot << "] overwritten with " << dyn.d_tag;
613 } 629 }
614 630
615 // Remove the element in the dynamics vector that matches the given tag with 631 // Remove the element in the dynamics vector that matches the given tag with
616 // unused slot data. Shuffle the following elements up, and ensure that the 632 // unused slot data. Shuffle the following elements up, and ensure that the
617 // last is the null sentinel. 633 // last is the null sentinel.
618 void RemoveDynamicEntry(Elf32_Sword tag, 634 void RemoveDynamicEntry(ELF::Sword tag,
619 std::vector<Elf32_Dyn>* dynamics) { 635 std::vector<ELF::Dyn>* dynamics) {
620 // Loop until the penultimate entry, and never match the end sentinel. 636 const size_t slot = FindDynamicEntry(tag, dynamics);
621 for (size_t i = 0; i < dynamics->size() - 1; ++i) { 637 CHECK(slot != dynamics->size());
622 Elf32_Dyn &slot = dynamics->at(i); 638
623 if (slot.d_tag == tag) { 639 // Remove this entry by shuffling up everything that follows.
624 for ( ; i < dynamics->size() - 1; ++i) { 640 for (size_t i = slot; i < dynamics->size() - 1; ++i) {
625 dynamics->at(i) = dynamics->at(i + 1); 641 dynamics->at(i) = dynamics->at(i + 1);
626 VLOG(1) << "dynamic[" << i 642 VLOG(1) << "dynamic[" << i
627 << "] overwritten with dynamic[" << i + 1 << "]"; 643 << "] overwritten with dynamic[" << i + 1 << "]";
628 }
629 CHECK(dynamics->at(i).d_tag == DT_NULL);
630 return;
631 }
632 } 644 }
633 645
634 // No matching dynamics vector entry was found. 646 // Ensure that the end sentinel is still present.
635 NOTREACHED(); 647 CHECK(dynamics->at(dynamics->size() - 1).d_tag == DT_NULL);
636 } 648 }
637 649
638 // Apply R_ARM_RELATIVE relocations to the file data to which they refer. 650 // Apply ARM relative relocations to the file data to which they refer.
639 // This relocates data into the area it will occupy after the hole in 651 // This relocates data into the area it will occupy after the hole in
640 // .rel.dyn is added or removed. 652 // .rel.dyn is added or removed.
641 void AdjustRelocationTargets(Elf* elf, 653 void AdjustRelocationTargets(Elf* elf,
642 Elf32_Off hole_start, 654 ELF::Off hole_start,
643 size_t hole_size, 655 size_t hole_size,
644 const std::vector<Elf32_Rel>& relocations) { 656 const std::vector<ELF::Rel>& relocations) {
645 Elf_Scn* section = NULL; 657 Elf_Scn* section = NULL;
646 while ((section = elf_nextscn(elf, section)) != NULL) { 658 while ((section = elf_nextscn(elf, section)) != NULL) {
647 const Elf32_Shdr* section_header = elf32_getshdr(section); 659 const ELF::Shdr* section_header = ELF::getshdr(section);
648 660
649 // Identify this section's start and end addresses. 661 // Identify this section's start and end addresses.
650 const Elf32_Addr section_start = section_header->sh_addr; 662 const ELF::Addr section_start = section_header->sh_addr;
651 const Elf32_Addr section_end = section_start + section_header->sh_size; 663 const ELF::Addr section_end = section_start + section_header->sh_size;
652 664
653 Elf_Data* data = GetSectionData(section); 665 Elf_Data* data = GetSectionData(section);
654 666
655 // Ignore sections with no effective data. 667 // Ignore sections with no effective data.
656 if (data->d_buf == NULL) 668 if (data->d_buf == NULL)
657 continue; 669 continue;
658 670
659 // Create a copy-on-write pointer to the section's data. 671 // Create a copy-on-write pointer to the section's data.
660 uint8_t* area = reinterpret_cast<uint8_t*>(data->d_buf); 672 uint8_t* area = reinterpret_cast<uint8_t*>(data->d_buf);
661 673
662 for (size_t i = 0; i < relocations.size(); ++i) { 674 for (size_t i = 0; i < relocations.size(); ++i) {
663 const Elf32_Rel* relocation = &relocations[i]; 675 const ELF::Rel* relocation = &relocations[i];
664 CHECK(ELF32_R_TYPE(relocation->r_info) == R_ARM_RELATIVE); 676 CHECK(ELF_R_TYPE(relocation->r_info) == ELF::kArmRelativeRelocationCode);
665 677
666 // See if this relocation points into the current section. 678 // See if this relocation points into the current section.
667 if (relocation->r_offset >= section_start && 679 if (relocation->r_offset >= section_start &&
668 relocation->r_offset < section_end) { 680 relocation->r_offset < section_end) {
669 Elf32_Addr byte_offset = relocation->r_offset - section_start; 681 ELF::Addr byte_offset = relocation->r_offset - section_start;
670 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset); 682 ELF::Off* target = reinterpret_cast<ELF::Off*>(area + byte_offset);
671 683
672 // Is the relocation's target after the hole's start? 684 // Is the relocation's target after the hole's start?
673 if (*target > hole_start) { 685 if (*target > hole_start) {
674 // Copy on first write. Recompute target to point into the newly 686 // Copy on first write. Recompute target to point into the newly
675 // allocated buffer. 687 // allocated buffer.
676 if (area == data->d_buf) { 688 if (area == data->d_buf) {
677 area = new uint8_t[data->d_size]; 689 area = new uint8_t[data->d_size];
678 memcpy(area, data->d_buf, data->d_size); 690 memcpy(area, data->d_buf, data->d_size);
679 target = reinterpret_cast<Elf32_Off*>(area + byte_offset); 691 target = reinterpret_cast<ELF::Off*>(area + byte_offset);
680 } 692 }
681 693
682 *target += hole_size; 694 *target += hole_size;
683 VLOG(1) << "relocation[" << i << "] target adjusted to " << *target; 695 VLOG(1) << "relocation[" << i << "] target adjusted to " << *target;
684 } 696 }
685 } 697 }
686 } 698 }
687 699
688 // If we applied any relocation to this section, write it back. 700 // If we applied any relocation to this section, write it back.
689 if (area != data->d_buf) { 701 if (area != data->d_buf) {
690 RewriteSectionData(data, area, data->d_size); 702 RewriteSectionData(data, area, data->d_size);
691 delete [] area; 703 delete [] area;
692 } 704 }
693 } 705 }
694 } 706 }
695 707
696 // Pad relocations with a given number of R_ARM_NONE relocations. 708 // Pad relocations with a given number of null relocations.
697 void PadRelocations(size_t count, 709 void PadRelocations(size_t count,
698 std::vector<Elf32_Rel>* relocations) { 710 std::vector<ELF::Rel>* relocations) {
699 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0}; 711 ELF::Rel null_relocation;
700 std::vector<Elf32_Rel> padding(count, r_arm_none); 712 null_relocation.r_offset = 0;
713 null_relocation.r_info = ELF_R_INFO(0, ELF::kArmNoRelocationCode);
714 std::vector<ELF::Rel> padding(count, null_relocation);
701 relocations->insert(relocations->end(), padding.begin(), padding.end()); 715 relocations->insert(relocations->end(), padding.begin(), padding.end());
702 } 716 }
703 717
704 // Adjust relocations so that the offset that they indicate will be correct 718 // Adjust relocations so that the offset that they indicate will be correct
705 // after the hole in .rel.dyn is added or removed (in effect, relocate the 719 // after the hole in .rel.dyn is added or removed (in effect, relocate the
706 // relocations). 720 // relocations).
707 void AdjustRelocations(Elf32_Off hole_start, 721 void AdjustRelocations(ELF::Off hole_start,
708 size_t hole_size, 722 size_t hole_size,
709 std::vector<Elf32_Rel>* relocations) { 723 std::vector<ELF::Rel>* relocations) {
710 for (size_t i = 0; i < relocations->size(); ++i) { 724 for (size_t i = 0; i < relocations->size(); ++i) {
711 Elf32_Rel* relocation = &relocations->at(i); 725 ELF::Rel* relocation = &relocations->at(i);
712 if (relocation->r_offset > hole_start) { 726 if (relocation->r_offset > hole_start) {
713 relocation->r_offset += hole_size; 727 relocation->r_offset += hole_size;
714 VLOG(1) << "relocation[" << i 728 VLOG(1) << "relocation[" << i
715 << "] offset adjusted to " << relocation->r_offset; 729 << "] offset adjusted to " << relocation->r_offset;
716 } 730 }
717 } 731 }
718 } 732 }
719 733
720 } // namespace 734 } // namespace
721 735
722 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data 736 // Remove ARM relative entries from .rel.dyn and write as packed data
723 // into .android.rel.dyn. 737 // into .android.rel.dyn.
724 bool ElfFile::PackRelocations() { 738 bool ElfFile::PackRelocations() {
725 // Load the ELF file into libelf. 739 // Load the ELF file into libelf.
726 if (!Load()) { 740 if (!Load()) {
727 LOG(ERROR) << "Failed to load as ELF (elf_error=" << elf_errno() << ")"; 741 LOG(ERROR) << "Failed to load as ELF";
728 return false; 742 return false;
729 } 743 }
730 744
731 // Retrieve the current .rel.dyn section data. 745 // Retrieve the current .rel.dyn section data.
732 Elf_Data* data = GetSectionData(rel_dyn_section_); 746 Elf_Data* data = GetSectionData(rel_dyn_section_);
733 747
734 // Convert data to a vector of Elf32 relocations. 748 // Convert data to a vector of Elf32 relocations.
735 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); 749 const ELF::Rel* relocations_base = reinterpret_cast<ELF::Rel*>(data->d_buf);
736 std::vector<Elf32_Rel> relocations( 750 std::vector<ELF::Rel> relocations(
737 relocations_base, 751 relocations_base,
738 relocations_base + data->d_size / sizeof(relocations[0])); 752 relocations_base + data->d_size / sizeof(relocations[0]));
739 753
740 std::vector<Elf32_Rel> relative_relocations; 754 std::vector<ELF::Rel> relative_relocations;
741 std::vector<Elf32_Rel> other_relocations; 755 std::vector<ELF::Rel> other_relocations;
742 756
743 // Filter relocations into those that are R_ARM_RELATIVE and others. 757 // Filter relocations into those that are ARM relative and others.
744 for (size_t i = 0; i < relocations.size(); ++i) { 758 for (size_t i = 0; i < relocations.size(); ++i) {
745 const Elf32_Rel& relocation = relocations[i]; 759 const ELF::Rel& relocation = relocations[i];
746 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) { 760 if (ELF_R_TYPE(relocation.r_info) == ELF::kArmRelativeRelocationCode) {
747 CHECK(ELF32_R_SYM(relocation.r_info) == 0); 761 CHECK(ELF_R_SYM(relocation.r_info) == 0);
748 relative_relocations.push_back(relocation); 762 relative_relocations.push_back(relocation);
749 } else { 763 } else {
750 other_relocations.push_back(relocation); 764 other_relocations.push_back(relocation);
751 } 765 }
752 } 766 }
753 LOG(INFO) << "R_ARM_RELATIVE: " << relative_relocations.size() << " entries"; 767 LOG(INFO) << "ARM relative : " << relative_relocations.size() << " entries";
rmcilroy 2014/07/18 14:05:17 /s/ARM relative/Relative
simonb (inactive) 2014/07/21 12:15:49 Done. Also other log messages.
754 LOG(INFO) << "Other : " << other_relocations.size() << " entries"; 768 LOG(INFO) << "Other : " << other_relocations.size() << " entries";
755 LOG(INFO) << "Total : " << relocations.size() << " entries"; 769 LOG(INFO) << "Total : " << relocations.size() << " entries";
756 770
757 // If no relative relocations then we have nothing packable. Perhaps 771 // If no relative relocations then we have nothing packable. Perhaps
758 // the shared object has already been packed? 772 // the shared object has already been packed?
759 if (relative_relocations.empty()) { 773 if (relative_relocations.empty()) {
760 LOG(ERROR) << "No R_ARM_RELATIVE relocations found (already packed?)"; 774 LOG(ERROR) << "No ARM relative relocations found (already packed?)";
761 return false; 775 return false;
762 } 776 }
763 777
764 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the 778 // Unless padding, pre-apply ARM relative relocations to account for the
765 // hole, and pre-adjust all relocation offsets accordingly. 779 // hole, and pre-adjust all relocation offsets accordingly.
766 if (!is_padding_rel_dyn_) { 780 if (!is_padding_rel_dyn_) {
767 // Pre-calculate the size of the hole we will close up when we rewrite 781 // Pre-calculate the size of the hole we will close up when we rewrite
768 // .rel.dyn. We have to adjust relocation addresses to account for this. 782 // .rel.dyn. We have to adjust relocation addresses to account for this.
769 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); 783 ELF::Shdr* section_header = ELF::getshdr(rel_dyn_section_);
770 const Elf32_Off hole_start = section_header->sh_offset; 784 const ELF::Off hole_start = section_header->sh_offset;
771 size_t hole_size = 785 size_t hole_size =
772 relative_relocations.size() * sizeof(relative_relocations[0]); 786 relative_relocations.size() * sizeof(relative_relocations[0]);
773 const size_t unaligned_hole_size = hole_size; 787 const size_t unaligned_hole_size = hole_size;
774 788
775 // Adjust the actual hole size to preserve alignment. 789 // Adjust the actual hole size to preserve alignment.
776 hole_size -= hole_size % kPreserveAlignment; 790 hole_size -= hole_size % kPreserveAlignment;
777 LOG(INFO) << "Compaction : " << hole_size << " bytes"; 791 LOG(INFO) << "Compaction : " << hole_size << " bytes";
778 792
779 // Adjusting for alignment may have removed any packing benefit. 793 // Adjusting for alignment may have removed any packing benefit.
780 if (hole_size == 0) { 794 if (hole_size == 0) {
781 LOG(INFO) << "Too few R_ARM_RELATIVE relocations to pack after alignment"; 795 LOG(INFO) << "Too few ARM relative relocations to pack after alignment";
782 return false; 796 return false;
783 } 797 }
784 798
785 // Add R_ARM_NONE relocations to other_relocations to preserve alignment. 799 // Add null relocations to other_relocations to preserve alignment.
786 const size_t padding_bytes = unaligned_hole_size - hole_size; 800 const size_t padding_bytes = unaligned_hole_size - hole_size;
787 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0); 801 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0);
788 const size_t required = padding_bytes / sizeof(other_relocations[0]); 802 const size_t required = padding_bytes / sizeof(other_relocations[0]);
789 PadRelocations(required, &other_relocations); 803 PadRelocations(required, &other_relocations);
790 LOG(INFO) << "Alignment pad : " << required << " relocations"; 804 LOG(INFO) << "Alignment pad : " << required << " relocations";
791 805
792 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the 806 // Apply relocations to all ARM relative data to relocate it into the
793 // area it will occupy once the hole in .rel.dyn is removed. 807 // area it will occupy once the hole in .rel.dyn is removed.
794 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations); 808 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations);
795 // Relocate the relocations. 809 // Relocate the relocations.
796 AdjustRelocations(hole_start, -hole_size, &relative_relocations); 810 AdjustRelocations(hole_start, -hole_size, &relative_relocations);
797 AdjustRelocations(hole_start, -hole_size, &other_relocations); 811 AdjustRelocations(hole_start, -hole_size, &other_relocations);
798 } else { 812 } else {
799 // If padding, add R_ARM_NONE relocations to other_relocations to make it 813 // If padding, add NONE-type relocations to other_relocations to make it
800 // the same size as the the original relocations we read in. This makes 814 // the same size as the the original relocations we read in. This makes
801 // the ResizeSection() below a no-op. 815 // the ResizeSection() below a no-op.
802 const size_t required = relocations.size() - other_relocations.size(); 816 const size_t required = relocations.size() - other_relocations.size();
803 PadRelocations(required, &other_relocations); 817 PadRelocations(required, &other_relocations);
804 } 818 }
805 819
806 820 // Pack ARM relative relocations.
807 // Pack R_ARM_RELATIVE relocations.
808 const size_t initial_bytes = 821 const size_t initial_bytes =
809 relative_relocations.size() * sizeof(relative_relocations[0]); 822 relative_relocations.size() * sizeof(relative_relocations[0]);
810 LOG(INFO) << "Unpacked R_ARM_RELATIVE: " << initial_bytes << " bytes"; 823 LOG(INFO) << "Unpacked ARM relative: " << initial_bytes << " bytes";
811 std::vector<uint8_t> packed; 824 std::vector<uint8_t> packed;
812 RelocationPacker packer; 825 RelocationPacker packer;
813 packer.PackRelativeRelocations(relative_relocations, &packed); 826 packer.PackRelativeRelocations(relative_relocations, &packed);
814 const void* packed_data = &packed[0]; 827 const void* packed_data = &packed[0];
815 const size_t packed_bytes = packed.size() * sizeof(packed[0]); 828 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
816 LOG(INFO) << "Packed R_ARM_RELATIVE: " << packed_bytes << " bytes"; 829 LOG(INFO) << "Packed ARM relative: " << packed_bytes << " bytes";
817 830
818 // If we have insufficient R_ARM_RELATIVE relocations to form a run then 831 // If we have insufficient ARM relative relocations to form a run then
819 // packing fails. 832 // packing fails.
820 if (packed.empty()) { 833 if (packed.empty()) {
821 LOG(INFO) << "Too few R_ARM_RELATIVE relocations to pack"; 834 LOG(INFO) << "Too few ARM relative relocations to pack";
822 return false; 835 return false;
823 } 836 }
824 837
825 // Run a loopback self-test as a check that packing is lossless. 838 // Run a loopback self-test as a check that packing is lossless.
826 std::vector<Elf32_Rel> unpacked; 839 std::vector<ELF::Rel> unpacked;
827 packer.UnpackRelativeRelocations(packed, &unpacked); 840 packer.UnpackRelativeRelocations(packed, &unpacked);
828 CHECK(unpacked.size() == relative_relocations.size()); 841 CHECK(unpacked.size() == relative_relocations.size());
829 for (size_t i = 0; i < unpacked.size(); ++i) { 842 CHECK(!memcmp(&unpacked[0],
830 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset); 843 &relative_relocations[0],
831 CHECK(unpacked[i].r_info == relative_relocations[i].r_info); 844 unpacked.size() * sizeof(unpacked[0])));
832 }
833 845
834 // Make sure packing saved some space. 846 // Make sure packing saved some space.
835 if (packed_bytes >= initial_bytes) { 847 if (packed_bytes >= initial_bytes) {
836 LOG(INFO) << "Packing R_ARM_RELATIVE relocations saves no space"; 848 LOG(INFO) << "Packing ARM relative relocations saves no space";
837 return false; 849 return false;
838 } 850 }
839 851
840 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE 852 // Rewrite the current .rel.dyn section to be only the ARM non-relative
841 // relocations, then shrink it to size. 853 // relocations, then shrink it to size.
842 const void* section_data = &other_relocations[0]; 854 const void* section_data = &other_relocations[0];
843 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]); 855 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]);
844 ResizeSection(elf_, rel_dyn_section_, bytes); 856 ResizeSection(elf_, rel_dyn_section_, bytes);
845 RewriteSectionData(data, section_data, bytes); 857 RewriteSectionData(data, section_data, bytes);
846 858
847 // Rewrite the current .android.rel.dyn section to hold the packed 859 // Rewrite the current .android.rel.dyn section to hold the packed
848 // R_ARM_RELATIVE relocations. 860 // ARM relative relocations.
849 data = GetSectionData(android_rel_dyn_section_); 861 data = GetSectionData(android_rel_dyn_section_);
850 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes); 862 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes);
851 RewriteSectionData(data, packed_data, packed_bytes); 863 RewriteSectionData(data, packed_data, packed_bytes);
852 864
853 // Rewrite .dynamic to include two new tags describing .android.rel.dyn. 865 // Rewrite .dynamic to include two new tags describing .android.rel.dyn.
854 data = GetSectionData(dynamic_section_); 866 data = GetSectionData(dynamic_section_);
855 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf); 867 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
856 std::vector<Elf32_Dyn> dynamics( 868 std::vector<ELF::Dyn> dynamics(
857 dynamic_base, 869 dynamic_base,
858 dynamic_base + data->d_size / sizeof(dynamics[0])); 870 dynamic_base + data->d_size / sizeof(dynamics[0]));
859 Elf32_Shdr* section_header = elf32_getshdr(android_rel_dyn_section_);
860 // Use two of the spare slots to describe the .android.rel.dyn section. 871 // Use two of the spare slots to describe the .android.rel.dyn section.
861 const Elf32_Dyn offset_dyn 872 ELF::Shdr* section_header = ELF::getshdr(android_rel_dyn_section_);
873 const ELF::Dyn offset_dyn
862 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}}; 874 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}};
863 AddDynamicEntry(offset_dyn, &dynamics); 875 AddDynamicEntry(offset_dyn, &dynamics);
864 const Elf32_Dyn size_dyn 876 const ELF::Dyn size_dyn
865 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}}; 877 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}};
866 AddDynamicEntry(size_dyn, &dynamics); 878 AddDynamicEntry(size_dyn, &dynamics);
867 const void* dynamics_data = &dynamics[0]; 879 const void* dynamics_data = &dynamics[0];
868 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]); 880 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
869 RewriteSectionData(data, dynamics_data, dynamics_bytes); 881 RewriteSectionData(data, dynamics_data, dynamics_bytes);
870 882
871 Flush(); 883 Flush();
872 return true; 884 return true;
873 } 885 }
874 886
875 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them, 887 // Find packed ARM relative relocations in .android.rel.dyn, unpack them,
876 // and rewrite the .rel.dyn section in so_file to contain unpacked data. 888 // and rewrite the .rel.dyn section in so_file to contain unpacked data.
877 bool ElfFile::UnpackRelocations() { 889 bool ElfFile::UnpackRelocations() {
878 // Load the ELF file into libelf. 890 // Load the ELF file into libelf.
879 if (!Load()) { 891 if (!Load()) {
880 LOG(ERROR) << "Failed to load as ELF (elf_error=" << elf_errno() << ")"; 892 LOG(ERROR) << "Failed to load as ELF";
881 return false; 893 return false;
882 } 894 }
883 895
884 // Retrieve the current .android.rel.dyn section data. 896 // Retrieve the current .android.rel.dyn section data.
885 Elf_Data* data = GetSectionData(android_rel_dyn_section_); 897 Elf_Data* data = GetSectionData(android_rel_dyn_section_);
886 898
887 // Convert data to a vector of bytes. 899 // Convert data to a vector of bytes.
888 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf); 900 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf);
889 std::vector<uint8_t> packed( 901 std::vector<uint8_t> packed(
890 packed_base, 902 packed_base,
891 packed_base + data->d_size / sizeof(packed[0])); 903 packed_base + data->d_size / sizeof(packed[0]));
892 904
893 // Properly packed data must begin with "APR1". 905 // Properly packed data must begin with "APR1".
894 if (packed.empty() || 906 if (packed.empty() ||
895 packed[0] != 'A' || packed[1] != 'P' || 907 packed[0] != 'A' || packed[1] != 'P' ||
896 packed[2] != 'R' || packed[3] != '1') { 908 packed[2] != 'R' || packed[3] != '1') {
897 LOG(ERROR) << "Packed R_ARM_RELATIVE relocations not found (not packed?)"; 909 LOG(ERROR) << "Packed ARM relative relocations not found (not packed?)";
898 return false; 910 return false;
899 } 911 }
900 912
901 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations. 913 // Unpack the data to re-materialize the ARM relative relocations.
902 const size_t packed_bytes = packed.size() * sizeof(packed[0]); 914 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
903 LOG(INFO) << "Packed R_ARM_RELATIVE: " << packed_bytes << " bytes"; 915 LOG(INFO) << "Packed ARM relative: " << packed_bytes << " bytes";
904 std::vector<Elf32_Rel> relative_relocations; 916 std::vector<ELF::Rel> relative_relocations;
905 RelocationPacker packer; 917 RelocationPacker packer;
906 packer.UnpackRelativeRelocations(packed, &relative_relocations); 918 packer.UnpackRelativeRelocations(packed, &relative_relocations);
907 const size_t unpacked_bytes = 919 const size_t unpacked_bytes =
908 relative_relocations.size() * sizeof(relative_relocations[0]); 920 relative_relocations.size() * sizeof(relative_relocations[0]);
909 LOG(INFO) << "Unpacked R_ARM_RELATIVE: " << unpacked_bytes << " bytes"; 921 LOG(INFO) << "Unpacked ARM relative: " << unpacked_bytes << " bytes";
910 922
911 // Retrieve the current .rel.dyn section data. 923 // Retrieve the current .rel.dyn section data.
912 data = GetSectionData(rel_dyn_section_); 924 data = GetSectionData(rel_dyn_section_);
913 925
914 // Interpret data as Elf32 relocations. 926 // Interpret data as Elf32 relocations.
915 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf); 927 const ELF::Rel* relocations_base = reinterpret_cast<ELF::Rel*>(data->d_buf);
916 std::vector<Elf32_Rel> relocations( 928 std::vector<ELF::Rel> relocations(
917 relocations_base, 929 relocations_base,
918 relocations_base + data->d_size / sizeof(relocations[0])); 930 relocations_base + data->d_size / sizeof(relocations[0]));
919 931
920 std::vector<Elf32_Rel> other_relocations; 932 std::vector<ELF::Rel> other_relocations;
921 size_t padding = 0; 933 size_t padding = 0;
922 934
923 // Filter relocations to locate any that are R_ARM_NONE. These will occur 935 // Filter relocations to locate any that are NONE-type. These will occur
924 // if padding was turned on for packing. 936 // if padding was turned on for packing.
925 for (size_t i = 0; i < relocations.size(); ++i) { 937 for (size_t i = 0; i < relocations.size(); ++i) {
926 const Elf32_Rel& relocation = relocations[i]; 938 const ELF::Rel& relocation = relocations[i];
927 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) { 939 if (ELF_R_TYPE(relocation.r_info) != ELF::kArmNoRelocationCode) {
928 other_relocations.push_back(relocation); 940 other_relocations.push_back(relocation);
929 } else { 941 } else {
930 ++padding; 942 ++padding;
931 } 943 }
932 } 944 }
933 LOG(INFO) << "R_ARM_RELATIVE: " << relative_relocations.size() << " entries"; 945 LOG(INFO) << "ARM relative : " << relative_relocations.size() << " entries";
934 LOG(INFO) << "Other : " << other_relocations.size() << " entries"; 946 LOG(INFO) << "Other : " << other_relocations.size() << " entries";
935 947
936 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we 948 // If we found the same number of null relocation entries in .rel.dyn as we
937 // hold as unpacked relative relocations, then this is a padded file. 949 // hold as unpacked relative relocations, then this is a padded file.
938 const bool is_padded = padding == relative_relocations.size(); 950 const bool is_padded = padding == relative_relocations.size();
939 951
940 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the 952 // Unless padded, pre-apply ARM relative relocations to account for the
941 // hole, and pre-adjust all relocation offsets accordingly. 953 // hole, and pre-adjust all relocation offsets accordingly.
942 if (!is_padded) { 954 if (!is_padded) {
943 // Pre-calculate the size of the hole we will open up when we rewrite 955 // Pre-calculate the size of the hole we will open up when we rewrite
944 // .rel.dyn. We have to adjust relocation addresses to account for this. 956 // .rel.dyn. We have to adjust relocation addresses to account for this.
945 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); 957 ELF::Shdr* section_header = ELF::getshdr(rel_dyn_section_);
946 const Elf32_Off hole_start = section_header->sh_offset; 958 const ELF::Off hole_start = section_header->sh_offset;
947 size_t hole_size = 959 size_t hole_size =
948 relative_relocations.size() * sizeof(relative_relocations[0]); 960 relative_relocations.size() * sizeof(relative_relocations[0]);
949 961
950 // Adjust the hole size for the padding added to preserve alignment. 962 // Adjust the hole size for the padding added to preserve alignment.
951 hole_size -= padding * sizeof(other_relocations[0]); 963 hole_size -= padding * sizeof(other_relocations[0]);
952 LOG(INFO) << "Expansion : " << hole_size << " bytes"; 964 LOG(INFO) << "Expansion : " << hole_size << " bytes";
953 965
954 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the 966 // Apply relocations to all ARM relative data to relocate it into the
955 // area it will occupy once the hole in .rel.dyn is opened. 967 // area it will occupy once the hole in .rel.dyn is opened.
956 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations); 968 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations);
957 // Relocate the relocations. 969 // Relocate the relocations.
958 AdjustRelocations(hole_start, hole_size, &relative_relocations); 970 AdjustRelocations(hole_start, hole_size, &relative_relocations);
959 AdjustRelocations(hole_start, hole_size, &other_relocations); 971 AdjustRelocations(hole_start, hole_size, &other_relocations);
960 } 972 }
961 973
962 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations 974 // Rewrite the current .rel.dyn section to be the ARM relative relocations
963 // followed by other relocations. This is the usual order in which we find 975 // followed by other relocations. This is the usual order in which we find
964 // them after linking, so this action will normally put the entire .rel.dyn 976 // them after linking, so this action will normally put the entire .rel.dyn
965 // section back to its pre-split-and-packed state. 977 // section back to its pre-split-and-packed state.
966 relocations.assign(relative_relocations.begin(), relative_relocations.end()); 978 relocations.assign(relative_relocations.begin(), relative_relocations.end());
967 relocations.insert(relocations.end(), 979 relocations.insert(relocations.end(),
968 other_relocations.begin(), other_relocations.end()); 980 other_relocations.begin(), other_relocations.end());
969 const void* section_data = &relocations[0]; 981 const void* section_data = &relocations[0];
970 const size_t bytes = relocations.size() * sizeof(relocations[0]); 982 const size_t bytes = relocations.size() * sizeof(relocations[0]);
971 LOG(INFO) << "Total : " << relocations.size() << " entries"; 983 LOG(INFO) << "Total : " << relocations.size() << " entries";
972 ResizeSection(elf_, rel_dyn_section_, bytes); 984 ResizeSection(elf_, rel_dyn_section_, bytes);
973 RewriteSectionData(data, section_data, bytes); 985 RewriteSectionData(data, section_data, bytes);
974 986
975 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte 987 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte
976 // stub so that some data remains allocated to the section. This is a 988 // stub so that some data remains allocated to the section. This is a
977 // convenience which allows us to re-pack this file again without 989 // convenience which allows us to re-pack this file again without
978 // having to remove the section and then add a new small one with objcopy. 990 // having to remove the section and then add a new small one with objcopy.
979 // The way we resize sections relies on there being some data in a section. 991 // The way we resize sections relies on there being some data in a section.
980 data = GetSectionData(android_rel_dyn_section_); 992 data = GetSectionData(android_rel_dyn_section_);
981 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier)); 993 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier));
982 RewriteSectionData(data, &kStubIdentifier, sizeof(kStubIdentifier)); 994 RewriteSectionData(data, &kStubIdentifier, sizeof(kStubIdentifier));
983 995
984 // Rewrite .dynamic to remove two tags describing .android.rel.dyn. 996 // Rewrite .dynamic to remove two tags describing .android.rel.dyn.
985 data = GetSectionData(dynamic_section_); 997 data = GetSectionData(dynamic_section_);
986 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf); 998 const ELF::Dyn* dynamic_base = reinterpret_cast<ELF::Dyn*>(data->d_buf);
987 std::vector<Elf32_Dyn> dynamics( 999 std::vector<ELF::Dyn> dynamics(
988 dynamic_base, 1000 dynamic_base,
989 dynamic_base + data->d_size / sizeof(dynamics[0])); 1001 dynamic_base + data->d_size / sizeof(dynamics[0]));
1002 RemoveDynamicEntry(DT_ANDROID_ARM_REL_OFFSET, &dynamics);
990 RemoveDynamicEntry(DT_ANDROID_ARM_REL_SIZE, &dynamics); 1003 RemoveDynamicEntry(DT_ANDROID_ARM_REL_SIZE, &dynamics);
991 RemoveDynamicEntry(DT_ANDROID_ARM_REL_OFFSET, &dynamics);
992 const void* dynamics_data = &dynamics[0]; 1004 const void* dynamics_data = &dynamics[0];
993 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]); 1005 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
994 RewriteSectionData(data, dynamics_data, dynamics_bytes); 1006 RewriteSectionData(data, dynamics_data, dynamics_bytes);
995 1007
996 Flush(); 1008 Flush();
997 return true; 1009 return true;
998 } 1010 }
999 1011
1000 // Flush rewritten shared object file data. 1012 // Flush rewritten shared object file data.
1001 void ElfFile::Flush() { 1013 void ElfFile::Flush() {
1002 // Flag all ELF data held in memory as needing to be written back to the 1014 // Flag all ELF data held in memory as needing to be written back to the
1003 // file, and tell libelf that we have controlled the file layout. 1015 // file, and tell libelf that we have controlled the file layout.
1004 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY); 1016 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY);
1005 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT); 1017 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT);
1006 1018
1007 // Write ELF data back to disk. 1019 // Write ELF data back to disk.
1008 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE); 1020 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE);
1009 CHECK(file_bytes > 0); 1021 CHECK(file_bytes > 0);
1010 VLOG(1) << "elf_update returned: " << file_bytes; 1022 VLOG(1) << "elf_update returned: " << file_bytes;
1011 1023
1012 // Clean up libelf, and truncate the output file to the number of bytes 1024 // Clean up libelf, and truncate the output file to the number of bytes
1013 // written by elf_update(). 1025 // written by elf_update().
1014 elf_end(elf_); 1026 elf_end(elf_);
1015 elf_ = NULL; 1027 elf_ = NULL;
1016 const int truncate = ftruncate(fd_, file_bytes); 1028 const int truncate = ftruncate(fd_, file_bytes);
1017 CHECK(truncate == 0); 1029 CHECK(truncate == 0);
1018 } 1030 }
1019 1031
1020 } // namespace relocation_packer 1032 } // namespace relocation_packer
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698