Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // TODO(simonb): Extend for 64-bit target libraries. | 5 // TODO(simonb): Extend for 64-bit target libraries. |
| 6 | 6 |
| 7 #include "elf_file.h" | 7 #include "elf_file.h" |
| 8 | 8 |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 #include <sys/types.h> | 10 #include <sys/types.h> |
| 11 #include <unistd.h> | 11 #include <unistd.h> |
| 12 #include <string> | 12 #include <string> |
| 13 #include <vector> | 13 #include <vector> |
| 14 | 14 |
| 15 #include "debug.h" | 15 #include "debug.h" |
| 16 #include "libelf.h" | 16 #include "libelf.h" |
| 17 #include "packer.h" | 17 #include "packer.h" |
| 18 | 18 |
| 19 namespace relocation_packer { | 19 namespace relocation_packer { |
| 20 | 20 |
| 21 // Stub identifier written to 'null out' packed data, "NULL". | 21 // Stub identifier written to 'null out' packed data, "NULL". |
| 22 static const Elf32_Word kStubIdentifier = 0x4c4c554eu; | 22 static const Elf32_Word kStubIdentifier = 0x4c4c554eu; |
| 23 | 23 |
| 24 // Out-of-band dynamic tags used to indicate the offset and size of the | 24 // Out-of-band dynamic tags used to indicate the offset and size of the |
| 25 // .android.rel.dyn section. | 25 // .android.rel.dyn section. |
| 26 static const Elf32_Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC; | 26 static const Elf32_Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC; |
| 27 static const Elf32_Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1; | 27 static const Elf32_Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1; |
| 28 | 28 |
| 29 // Alignment to preserve, in bytes. This must be at least as large as the | |
| 30 // largest d_align and sh_addralign values found in the loaded file. | |
| 31 static const size_t kPreserveAlignment = 256; | |
| 32 | |
| 29 namespace { | 33 namespace { |
| 30 | 34 |
| 31 // Get section data. Checks that the section has exactly one data entry, | 35 // Get section data. Checks that the section has exactly one data entry, |
| 32 // so that the section size and the data size are the same. True in | 36 // so that the section size and the data size are the same. True in |
| 33 // practice for all sections we resize when packing or unpacking. Done | 37 // practice for all sections we resize when packing or unpacking. Done |
| 34 // by ensuring that a call to elf_getdata(section, data) returns NULL as | 38 // by ensuring that a call to elf_getdata(section, data) returns NULL as |
| 35 // the next data entry. | 39 // the next data entry. |
| 36 Elf_Data* GetSectionData(Elf_Scn* section) { | 40 Elf_Data* GetSectionData(Elf_Scn* section) { |
| 37 Elf_Data* data = elf_getdata(section, NULL); | 41 Elf_Data* data = elf_getdata(section, NULL); |
| 38 CHECK(data && elf_getdata(section, data) == NULL); | 42 CHECK(data && elf_getdata(section, data) == NULL); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 84 VLOG(" p_memsz = %u\n", program_header->p_memsz); | 88 VLOG(" p_memsz = %u\n", program_header->p_memsz); |
| 85 } | 89 } |
| 86 | 90 |
| 87 // Verbose ELF section header logging. | 91 // Verbose ELF section header logging. |
| 88 void VerboseLogSectionHeader(const std::string& section_name, | 92 void VerboseLogSectionHeader(const std::string& section_name, |
| 89 const Elf32_Shdr* section_header) { | 93 const Elf32_Shdr* section_header) { |
| 90 VLOG("section %s\n", section_name.c_str()); | 94 VLOG("section %s\n", section_name.c_str()); |
| 91 VLOG(" sh_addr = %u\n", section_header->sh_addr); | 95 VLOG(" sh_addr = %u\n", section_header->sh_addr); |
| 92 VLOG(" sh_offset = %u\n", section_header->sh_offset); | 96 VLOG(" sh_offset = %u\n", section_header->sh_offset); |
| 93 VLOG(" sh_size = %u\n", section_header->sh_size); | 97 VLOG(" sh_size = %u\n", section_header->sh_size); |
| 98 VLOG(" sh_addralign = %u\n", section_header->sh_addralign); | |
| 94 } | 99 } |
| 95 | 100 |
| 96 // Verbose ELF section data logging. | 101 // Verbose ELF section data logging. |
| 97 void VerboseLogSectionData(const Elf_Data* data) { | 102 void VerboseLogSectionData(const Elf_Data* data) { |
| 98 VLOG(" data\n"); | 103 VLOG(" data\n"); |
| 99 VLOG(" d_buf = %p\n", data->d_buf); | 104 VLOG(" d_buf = %p\n", data->d_buf); |
| 100 VLOG(" d_off = %lu\n", data->d_off); | 105 VLOG(" d_off = %lu\n", data->d_off); |
| 101 VLOG(" d_size = %lu\n", data->d_size); | 106 VLOG(" d_size = %lu\n", data->d_size); |
| 107 VLOG(" d_align = %lu\n", data->d_align); | |
| 102 } | 108 } |
| 103 | 109 |
| 104 } // namespace | 110 } // namespace |
| 105 | 111 |
| 106 // Load the complete ELF file into a memory image in libelf, and identify | 112 // Load the complete ELF file into a memory image in libelf, and identify |
| 107 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the | 113 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the |
| 108 // ELF file has already been loaded. | 114 // ELF file has already been loaded. |
| 109 bool ElfFile::Load() { | 115 bool ElfFile::Load() { |
| 110 if (elf_) | 116 if (elf_) |
| 111 return true; | 117 return true; |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 183 } | 189 } |
| 184 if (section_header->sh_offset == dynamic_program_header->p_offset) { | 190 if (section_header->sh_offset == dynamic_program_header->p_offset) { |
| 185 found_dynamic_section = section; | 191 found_dynamic_section = section; |
| 186 } | 192 } |
| 187 | 193 |
| 188 // If we find a section named .debug*, set the debug warning flag. | 194 // If we find a section named .debug*, set the debug warning flag. |
| 189 if (std::string(name).find(".debug") == 0) { | 195 if (std::string(name).find(".debug") == 0) { |
| 190 has_debug_section = true; | 196 has_debug_section = true; |
| 191 } | 197 } |
| 192 | 198 |
| 199 // Ensure we preserve alignment, repeated later for the data block(s). | |
| 200 CHECK(section_header->sh_addralign <= kPreserveAlignment); | |
| 201 | |
| 193 Elf_Data* data = NULL; | 202 Elf_Data* data = NULL; |
| 194 while ((data = elf_getdata(section, data)) != NULL) { | 203 while ((data = elf_getdata(section, data)) != NULL) { |
| 204 CHECK(data->d_align <= kPreserveAlignment); | |
| 195 VerboseLogSectionData(data); | 205 VerboseLogSectionData(data); |
| 196 } | 206 } |
| 197 } | 207 } |
| 198 | 208 |
| 199 // Loading failed if we did not find the required special sections. | 209 // Loading failed if we did not find the required special sections. |
| 200 if (!found_rel_dyn_section) { | 210 if (!found_rel_dyn_section) { |
| 201 LOG("ERROR: Missing .rel.dyn section\n"); | 211 LOG("ERROR: Missing .rel.dyn section\n"); |
| 202 return false; | 212 return false; |
| 203 } | 213 } |
| 204 if (!found_dynamic_section) { | 214 if (!found_dynamic_section) { |
| (...skipping 442 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 647 CHECK(ELF32_R_TYPE(relocation->r_info) == R_ARM_RELATIVE); | 657 CHECK(ELF32_R_TYPE(relocation->r_info) == R_ARM_RELATIVE); |
| 648 | 658 |
| 649 // See if this relocation points into the current section. | 659 // See if this relocation points into the current section. |
| 650 if (relocation->r_offset >= section_start && | 660 if (relocation->r_offset >= section_start && |
| 651 relocation->r_offset < section_end) { | 661 relocation->r_offset < section_end) { |
| 652 Elf32_Addr byte_offset = relocation->r_offset - section_start; | 662 Elf32_Addr byte_offset = relocation->r_offset - section_start; |
| 653 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset); | 663 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset); |
| 654 | 664 |
| 655 // Is the relocation's target after the hole's start? | 665 // Is the relocation's target after the hole's start? |
| 656 if (*target > hole_start) { | 666 if (*target > hole_start) { |
| 657 | |
| 658 // Copy on first write. Recompute target to point into the newly | 667 // Copy on first write. Recompute target to point into the newly |
| 659 // allocated buffer. | 668 // allocated buffer. |
| 660 if (area == data->d_buf) { | 669 if (area == data->d_buf) { |
| 661 area = new uint8_t[data->d_size]; | 670 area = new uint8_t[data->d_size]; |
| 662 memcpy(area, data->d_buf, data->d_size); | 671 memcpy(area, data->d_buf, data->d_size); |
| 663 target = reinterpret_cast<Elf32_Off*>(area + byte_offset); | 672 target = reinterpret_cast<Elf32_Off*>(area + byte_offset); |
| 664 } | 673 } |
| 665 | 674 |
| 666 *target += hole_size; | 675 *target += hole_size; |
| 667 VLOG("relocation[%lu] target adjusted to %u\n", i, *target); | 676 VLOG("relocation[%lu] target adjusted to %u\n", i, *target); |
| 668 } | 677 } |
| 669 } | 678 } |
| 670 } | 679 } |
| 671 | 680 |
| 672 // If we applied any relocation to this section, write it back. | 681 // If we applied any relocation to this section, write it back. |
| 673 if (area != data->d_buf) { | 682 if (area != data->d_buf) { |
| 674 RewriteSectionData(data, area, data->d_size); | 683 RewriteSectionData(data, area, data->d_size); |
| 675 delete [] area; | 684 delete [] area; |
| 676 } | 685 } |
| 677 } | 686 } |
| 678 } | 687 } |
| 679 | 688 |
| 689 // Pad relocations with a given number of R_ARM_NONE relocations. | |
| 690 void PadRelocations(size_t count, | |
| 691 std::vector<Elf32_Rel>* relocations) { | |
| 692 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0}; | |
| 693 std::vector<Elf32_Rel> padding(count, r_arm_none); | |
| 694 relocations->insert(relocations->end(), padding.begin(), padding.end()); | |
| 695 } | |
| 696 | |
| 680 // Adjust relocations so that the offset that they indicate will be correct | 697 // Adjust relocations so that the offset that they indicate will be correct |
| 681 // after the hole in .rel.dyn is added or removed (in effect, relocate the | 698 // after the hole in .rel.dyn is added or removed (in effect, relocate the |
| 682 // relocations). | 699 // relocations). |
| 683 void AdjustRelocations(Elf32_Off hole_start, | 700 void AdjustRelocations(Elf32_Off hole_start, |
| 684 size_t hole_size, | 701 size_t hole_size, |
| 685 std::vector<Elf32_Rel>* relocations) { | 702 std::vector<Elf32_Rel>* relocations) { |
| 686 for (size_t i = 0; i < relocations->size(); ++i) { | 703 for (size_t i = 0; i < relocations->size(); ++i) { |
| 687 Elf32_Rel* relocation = &relocations->at(i); | 704 Elf32_Rel* relocation = &relocations->at(i); |
| 688 if (relocation->r_offset > hole_start) { | 705 if (relocation->r_offset > hole_start) { |
| 689 relocation->r_offset += hole_size; | 706 relocation->r_offset += hole_size; |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 718 // Filter relocations into those that are R_ARM_RELATIVE and others. | 735 // Filter relocations into those that are R_ARM_RELATIVE and others. |
| 719 for (size_t i = 0; i < relocations.size(); ++i) { | 736 for (size_t i = 0; i < relocations.size(); ++i) { |
| 720 const Elf32_Rel& relocation = relocations[i]; | 737 const Elf32_Rel& relocation = relocations[i]; |
| 721 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) { | 738 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) { |
| 722 CHECK(ELF32_R_SYM(relocation.r_info) == 0); | 739 CHECK(ELF32_R_SYM(relocation.r_info) == 0); |
| 723 relative_relocations.push_back(relocation); | 740 relative_relocations.push_back(relocation); |
| 724 } else { | 741 } else { |
| 725 other_relocations.push_back(relocation); | 742 other_relocations.push_back(relocation); |
| 726 } | 743 } |
| 727 } | 744 } |
| 728 VLOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); | 745 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); |
| 729 VLOG("Other : %lu entries\n", other_relocations.size()); | 746 LOG("Other : %lu entries\n", other_relocations.size()); |
| 730 VLOG("Total : %lu entries\n", relocations.size()); | 747 LOG("Total : %lu entries\n", relocations.size()); |
| 731 | 748 |
| 732 // If no relative relocations then we have nothing packable. Perhaps | 749 // If no relative relocations then we have nothing packable. Perhaps |
| 733 // the shared object has already been packed? | 750 // the shared object has already been packed? |
| 734 if (relative_relocations.empty()) { | 751 if (relative_relocations.empty()) { |
| 735 LOG("ERROR: No R_ARM_RELATIVE relocations found (already packed?)\n"); | 752 LOG("ERROR: No R_ARM_RELATIVE relocations found (already packed?)\n"); |
| 736 return false; | 753 return false; |
| 737 } | 754 } |
| 738 | 755 |
| 739 // Pre-calculate the size of the hole we will close up when we rewrite | |
| 740 // .reldyn. We have to adjust all relocation addresses to account for this. | |
| 741 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | |
| 742 const Elf32_Off hole_start = section_header->sh_offset; | |
| 743 const size_t hole_size = | |
| 744 relative_relocations.size() * sizeof(relative_relocations[0]); | |
| 745 | |
| 746 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the | 756 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the |
| 747 // hole, and pre-adjust all relocation offsets accordingly. | 757 // hole, and pre-adjust all relocation offsets accordingly. |
| 748 if (!is_padding_rel_dyn_) { | 758 if (!is_padding_rel_dyn_) { |
| 759 // Pre-calculate the size of the hole we will close up when we rewrite | |
| 760 // .rel.dyn. We have to adjust relocation addresses to account for this. | |
| 761 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | |
| 762 const Elf32_Off hole_start = section_header->sh_offset; | |
| 763 size_t hole_size = | |
| 764 relative_relocations.size() * sizeof(relative_relocations[0]); | |
| 765 const size_t unaligned_hole_size = hole_size; | |
| 766 | |
| 767 // Adjust the actual hole size to preserve alignment. | |
| 768 hole_size -= hole_size % kPreserveAlignment; | |
| 769 LOG("Compaction : %lu bytes\n", hole_size); | |
| 770 | |
| 771 // Adjusting for alignment may have removed any packing benefit. | |
| 772 if (hole_size == 0) { | |
| 773 LOG("Too few R_ARM_RELATIVE relocations to pack after alignment\n"); | |
| 774 return false; | |
| 775 } | |
| 776 | |
| 777 // Add R_ARM_NONE relocations to other_relocations to preserve alignment. | |
| 778 const size_t padding_bytes = unaligned_hole_size - hole_size; | |
| 779 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0); | |
| 780 const size_t required = padding_bytes / sizeof(other_relocations[0]); | |
| 781 PadRelocations(required, &other_relocations); | |
| 782 LOG("Alignment pad : %lu relocations\n", required); | |
| 783 | |
| 749 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the | 784 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the |
| 750 // area it will occupy once the hole in .rel.dyn is removed. | 785 // area it will occupy once the hole in .rel.dyn is removed. |
| 751 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations); | 786 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations); |
| 752 // Relocate the relocations. | 787 // Relocate the relocations. |
| 753 AdjustRelocations(hole_start, -hole_size, &relative_relocations); | 788 AdjustRelocations(hole_start, -hole_size, &relative_relocations); |
| 754 AdjustRelocations(hole_start, -hole_size, &other_relocations); | 789 AdjustRelocations(hole_start, -hole_size, &other_relocations); |
| 755 } | 790 } |
| 756 | 791 |
| 757 // Pack R_ARM_RELATIVE relocations. | 792 // Pack R_ARM_RELATIVE relocations. |
| 758 const size_t initial_bytes = | 793 const size_t initial_bytes = |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 783 | 818 |
| 784 // Make sure packing saved some space. | 819 // Make sure packing saved some space. |
| 785 if (packed_bytes >= initial_bytes) { | 820 if (packed_bytes >= initial_bytes) { |
| 786 LOG("Packing R_ARM_RELATIVE relocations saves no space\n"); | 821 LOG("Packing R_ARM_RELATIVE relocations saves no space\n"); |
| 787 return false; | 822 return false; |
| 788 } | 823 } |
| 789 | 824 |
| 790 // If padding, add R_ARM_NONE relocations to other_relocations to make it | 825 // If padding, add R_ARM_NONE relocations to other_relocations to make it |
| 791 // the same size as the the original relocations we read in. This makes | 826 // the same size as the the original relocations we read in. This makes |
| 792 // the ResizeSection() below a no-op. | 827 // the ResizeSection() below a no-op. |
| 793 if (is_padding_rel_dyn_) { | 828 if (is_padding_rel_dyn_) { |
|
rmcilroy
2014/06/18 09:52:29
Could you just do this in the "else" block of the
simonb1
2014/06/18 11:30:19
Done.
| |
| 794 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0}; | |
| 795 const size_t required = relocations.size() - other_relocations.size(); | 829 const size_t required = relocations.size() - other_relocations.size(); |
| 796 std::vector<Elf32_Rel> padding(required, r_arm_none); | 830 PadRelocations(required, &other_relocations); |
| 797 other_relocations.insert( | |
| 798 other_relocations.end(), padding.begin(), padding.end()); | |
| 799 } | 831 } |
| 800 | 832 |
| 801 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE | 833 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE |
| 802 // relocations, then shrink it to size. | 834 // relocations, then shrink it to size. |
| 803 const void* section_data = &other_relocations[0]; | 835 const void* section_data = &other_relocations[0]; |
| 804 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]); | 836 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]); |
| 805 ResizeSection(elf_, rel_dyn_section_, bytes); | 837 ResizeSection(elf_, rel_dyn_section_, bytes); |
|
rmcilroy
2014/06/18 09:52:29
Do you need to pad this to kPreserveAlignment as w
simonb1
2014/06/18 11:30:19
Shouldn't. The R_ARM_NONE padding in other_reloca
rmcilroy
2014/06/18 13:01:07
Sorry - see below.
| |
| 806 RewriteSectionData(data, section_data, bytes); | 838 RewriteSectionData(data, section_data, bytes); |
| 807 | 839 |
| 808 // Rewrite the current .android.rel.dyn section to hold the packed | 840 // Rewrite the current .android.rel.dyn section to hold the packed |
| 809 // R_ARM_RELATIVE relocations. | 841 // R_ARM_RELATIVE relocations. |
| 810 data = GetSectionData(android_rel_dyn_section_); | 842 data = GetSectionData(android_rel_dyn_section_); |
| 811 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes); | 843 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes); |
| 812 RewriteSectionData(data, packed_data, packed_bytes); | 844 RewriteSectionData(data, packed_data, packed_bytes); |
|
rmcilroy
2014/06/18 13:01:07
Sorry I got the comment on the wrong line - I was
simonb1
2014/06/18 13:29:09
Happily, no. Libbfd locates .android.rel.dyn as t
| |
| 813 | 845 |
| 814 // Rewrite .dynamic to include two new tags describing .android.rel.dyn. | 846 // Rewrite .dynamic to include two new tags describing .android.rel.dyn. |
| 815 data = GetSectionData(dynamic_section_); | 847 data = GetSectionData(dynamic_section_); |
| 816 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf); | 848 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf); |
| 817 std::vector<Elf32_Dyn> dynamics( | 849 std::vector<Elf32_Dyn> dynamics( |
| 818 dynamic_base, | 850 dynamic_base, |
| 819 dynamic_base + data->d_size / sizeof(dynamics[0])); | 851 dynamic_base + data->d_size / sizeof(dynamics[0])); |
| 820 section_header = elf32_getshdr(android_rel_dyn_section_); | 852 Elf32_Shdr* section_header = elf32_getshdr(android_rel_dyn_section_); |
| 821 // Use two of the spare slots to describe the .android.rel.dyn section. | 853 // Use two of the spare slots to describe the .android.rel.dyn section. |
| 822 const Elf32_Dyn offset_dyn | 854 const Elf32_Dyn offset_dyn |
| 823 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}}; | 855 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}}; |
| 824 AddDynamicEntry(offset_dyn, &dynamics); | 856 AddDynamicEntry(offset_dyn, &dynamics); |
| 825 const Elf32_Dyn size_dyn | 857 const Elf32_Dyn size_dyn |
| 826 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}}; | 858 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}}; |
| 827 AddDynamicEntry(size_dyn, &dynamics); | 859 AddDynamicEntry(size_dyn, &dynamics); |
| 828 const void* dynamics_data = &dynamics[0]; | 860 const void* dynamics_data = &dynamics[0]; |
| 829 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]); | 861 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]); |
| 830 RewriteSectionData(data, dynamics_data, dynamics_bytes); | 862 RewriteSectionData(data, dynamics_data, dynamics_bytes); |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 891 ++padding; | 923 ++padding; |
| 892 } | 924 } |
| 893 } | 925 } |
| 894 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); | 926 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size()); |
| 895 LOG("Other : %lu entries\n", other_relocations.size()); | 927 LOG("Other : %lu entries\n", other_relocations.size()); |
| 896 | 928 |
| 897 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we | 929 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we |
| 898 // hold as unpacked relative relocations, then this is a padded file. | 930 // hold as unpacked relative relocations, then this is a padded file. |
| 899 const bool is_padded = padding == relative_relocations.size(); | 931 const bool is_padded = padding == relative_relocations.size(); |
| 900 | 932 |
| 901 // Pre-calculate the size of the hole we will open up when we rewrite | |
| 902 // .reldyn. We have to adjust all relocation addresses to account for this. | |
| 903 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | |
| 904 const Elf32_Off hole_start = section_header->sh_offset; | |
| 905 const size_t hole_size = | |
| 906 relative_relocations.size() * sizeof(relative_relocations[0]); | |
| 907 | |
| 908 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the | 933 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the |
| 909 // hole, and pre-adjust all relocation offsets accordingly. | 934 // hole, and pre-adjust all relocation offsets accordingly. |
| 910 if (!is_padded) { | 935 if (!is_padded) { |
| 936 // Pre-calculate the size of the hole we will open up when we rewrite | |
| 937 // .rel.dyn. We have to adjust relocation addresses to account for this. | |
| 938 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_); | |
| 939 const Elf32_Off hole_start = section_header->sh_offset; | |
| 940 size_t hole_size = | |
| 941 relative_relocations.size() * sizeof(relative_relocations[0]); | |
| 942 | |
| 943 // Adjust the hole size for the padding added to preserve alignment. | |
| 944 hole_size -= padding * sizeof(other_relocations[0]); | |
| 945 LOG("Expansion : %lu bytes\n", hole_size); | |
| 946 | |
| 911 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the | 947 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the |
| 912 // area it will occupy once the hole in .rel.dyn is opened. | 948 // area it will occupy once the hole in .rel.dyn is opened. |
| 913 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations); | 949 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations); |
| 914 // Relocate the relocations. | 950 // Relocate the relocations. |
| 915 AdjustRelocations(hole_start, hole_size, &relative_relocations); | 951 AdjustRelocations(hole_start, hole_size, &relative_relocations); |
| 916 AdjustRelocations(hole_start, hole_size, &other_relocations); | 952 AdjustRelocations(hole_start, hole_size, &other_relocations); |
| 917 } | 953 } |
| 918 | 954 |
| 919 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations | 955 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations |
| 920 // followed by other relocations. This is the usual order in which we find | 956 // followed by other relocations. This is the usual order in which we find |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 968 | 1004 |
| 969 // Clean up libelf, and truncate the output file to the number of bytes | 1005 // Clean up libelf, and truncate the output file to the number of bytes |
| 970 // written by elf_update(). | 1006 // written by elf_update(). |
| 971 elf_end(elf_); | 1007 elf_end(elf_); |
| 972 elf_ = NULL; | 1008 elf_ = NULL; |
| 973 const int truncate = ftruncate(fd_, file_bytes); | 1009 const int truncate = ftruncate(fd_, file_bytes); |
| 974 CHECK(truncate == 0); | 1010 CHECK(truncate == 0); |
| 975 } | 1011 } |
| 976 | 1012 |
| 977 } // namespace relocation_packer | 1013 } // namespace relocation_packer |
| OLD | NEW |