| Index: bfd/elf32-arm.c
|
| diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c
|
| index bc1f19532cde4d5a48f7c88430ccdf8a7931cd3a..70d6ac09357cebcd6b6f25c993344c906d08dc58 100644
|
| --- a/bfd/elf32-arm.c
|
| +++ b/bfd/elf32-arm.c
|
| @@ -1,6 +1,5 @@
|
| /* 32-bit ELF support for ARM
|
| - Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
|
| - 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
|
| + Copyright 1998-2013 Free Software Foundation, Inc.
|
|
|
| This file is part of BFD, the Binary File Descriptor library.
|
|
|
| @@ -63,6 +62,9 @@
|
| #define ARM_ELF_ABI_VERSION 0
|
| #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
|
|
|
| +/* The Adjusted Place, as defined by AAELF. */
|
| +#define Pa(X) ((X) & 0xfffffffc)
|
| +
|
| static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
|
| struct bfd_link_info *link_info,
|
| asection *sec,
|
| @@ -324,160 +326,160 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
|
| /* Dynamic TLS relocations. */
|
|
|
| HOWTO (R_ARM_TLS_DTPMOD32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_DTPMOD32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_DTPMOD32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_DTPOFF32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_DTPOFF32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_DTPOFF32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_TPOFF32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_TPOFF32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_TPOFF32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| /* Relocs used in ARM Linux */
|
|
|
| HOWTO (R_ARM_COPY, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_COPY", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_COPY", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_GLOB_DAT, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_GLOB_DAT", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_GLOB_DAT", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_JUMP_SLOT, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_JUMP_SLOT", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_JUMP_SLOT", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_RELATIVE, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_RELATIVE", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_RELATIVE", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_GOTOFF32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_GOTOFF32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_GOTOFF32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_GOTPC, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - TRUE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_GOTPC", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - TRUE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + TRUE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_GOTPC", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + TRUE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_GOT32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_GOT32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_GOT32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_PLT32, /* type */
|
| - 2, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 24, /* bitsize */
|
| - TRUE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_PLT32", /* name */
|
| - FALSE, /* partial_inplace */
|
| - 0x00ffffff, /* src_mask */
|
| - 0x00ffffff, /* dst_mask */
|
| - TRUE), /* pcrel_offset */
|
| + 2, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 24, /* bitsize */
|
| + TRUE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_PLT32", /* name */
|
| + FALSE, /* partial_inplace */
|
| + 0x00ffffff, /* src_mask */
|
| + 0x00ffffff, /* dst_mask */
|
| + TRUE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_CALL, /* type */
|
| 2, /* rightshift */
|
| @@ -1484,33 +1486,33 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
|
|
|
| /* GNU extension to record C++ vtable member usage */
|
| HOWTO (R_ARM_GNU_VTENTRY, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 0, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_dont, /* complain_on_overflow */
|
| - _bfd_elf_rel_vtable_reloc_fn, /* special_function */
|
| - "R_ARM_GNU_VTENTRY", /* name */
|
| - FALSE, /* partial_inplace */
|
| - 0, /* src_mask */
|
| - 0, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 0, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_dont, /* complain_on_overflow */
|
| + _bfd_elf_rel_vtable_reloc_fn, /* special_function */
|
| + "R_ARM_GNU_VTENTRY", /* name */
|
| + FALSE, /* partial_inplace */
|
| + 0, /* src_mask */
|
| + 0, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| /* GNU extension to record C++ vtable hierarchy */
|
| HOWTO (R_ARM_GNU_VTINHERIT, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 0, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_dont, /* complain_on_overflow */
|
| - NULL, /* special_function */
|
| - "R_ARM_GNU_VTINHERIT", /* name */
|
| - FALSE, /* partial_inplace */
|
| - 0, /* src_mask */
|
| - 0, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 0, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_dont, /* complain_on_overflow */
|
| + NULL, /* special_function */
|
| + "R_ARM_GNU_VTINHERIT", /* name */
|
| + FALSE, /* partial_inplace */
|
| + 0, /* src_mask */
|
| + 0, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_THM_JUMP11, /* type */
|
| 1, /* rightshift */
|
| @@ -1542,74 +1544,74 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
|
|
|
| /* TLS relocations */
|
| HOWTO (R_ARM_TLS_GD32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - NULL, /* special_function */
|
| - "R_ARM_TLS_GD32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + NULL, /* special_function */
|
| + "R_ARM_TLS_GD32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_LDM32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_LDM32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_LDM32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_LDO32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_LDO32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_LDO32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_IE32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - NULL, /* special_function */
|
| - "R_ARM_TLS_IE32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + NULL, /* special_function */
|
| + "R_ARM_TLS_IE32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_LE32, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_TLS_LE32", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE), /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_TLS_LE32", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE), /* pcrel_offset */
|
|
|
| HOWTO (R_ARM_TLS_LDO12, /* type */
|
| 0, /* rightshift */
|
| @@ -1693,18 +1695,18 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
|
| static reloc_howto_type elf32_arm_howto_table_2[1] =
|
| {
|
| HOWTO (R_ARM_IRELATIVE, /* type */
|
| - 0, /* rightshift */
|
| - 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| - 32, /* bitsize */
|
| - FALSE, /* pc_relative */
|
| - 0, /* bitpos */
|
| - complain_overflow_bitfield,/* complain_on_overflow */
|
| - bfd_elf_generic_reloc, /* special_function */
|
| - "R_ARM_IRELATIVE", /* name */
|
| - TRUE, /* partial_inplace */
|
| - 0xffffffff, /* src_mask */
|
| - 0xffffffff, /* dst_mask */
|
| - FALSE) /* pcrel_offset */
|
| + 0, /* rightshift */
|
| + 2, /* size (0 = byte, 1 = short, 2 = long) */
|
| + 32, /* bitsize */
|
| + FALSE, /* pc_relative */
|
| + 0, /* bitpos */
|
| + complain_overflow_bitfield,/* complain_on_overflow */
|
| + bfd_elf_generic_reloc, /* special_function */
|
| + "R_ARM_IRELATIVE", /* name */
|
| + TRUE, /* partial_inplace */
|
| + 0xffffffff, /* src_mask */
|
| + 0xffffffff, /* dst_mask */
|
| + FALSE) /* pcrel_offset */
|
| };
|
|
|
| /* 249-255 extended, currently unused, relocations: */
|
| @@ -1942,10 +1944,10 @@ elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
|
|
|
| case 148: /* Linux/ARM 32-bit. */
|
| /* pr_cursig */
|
| - elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
|
| + elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
|
|
|
| /* pr_pid */
|
| - elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
|
| + elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
|
|
|
| /* pr_reg */
|
| offset = 72;
|
| @@ -1968,11 +1970,11 @@ elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
|
| return FALSE;
|
|
|
| case 124: /* Linux/ARM elf_prpsinfo. */
|
| - elf_tdata (abfd)->core_pid
|
| + elf_tdata (abfd)->core->pid
|
| = bfd_get_32 (abfd, note->descdata + 12);
|
| - elf_tdata (abfd)->core_program
|
| + elf_tdata (abfd)->core->program
|
| = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
|
| - elf_tdata (abfd)->core_command
|
| + elf_tdata (abfd)->core->command
|
| = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
|
| }
|
|
|
| @@ -1980,7 +1982,7 @@ elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
|
| onto the end of the args in some (at least one anyway)
|
| implementations, so strip it off if it exists. */
|
| {
|
| - char *command = elf_tdata (abfd)->core_command;
|
| + char *command = elf_tdata (abfd)->core->command;
|
| int n = strlen (command);
|
|
|
| if (0 < n && command[n - 1] == ' ')
|
| @@ -2095,7 +2097,7 @@ static const unsigned long dl_tlsdesc_lazy_trampoline [] =
|
| 0xe081100f, /* 2: add r1, pc */
|
| 0xe12fff12, /* bx r2 */
|
| 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
|
| - + dl_tlsdesc_lazy_resolver(GOT) */
|
| + + dl_tlsdesc_lazy_resolver(GOT) */
|
| 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
|
| };
|
|
|
| @@ -2209,21 +2211,21 @@ static const bfd_vma elf32_arm_nacl_plt0_entry [] =
|
| 0xe08cc00f, /* add ip, ip, pc */
|
| 0xe52dc008, /* str ip, [sp, #-8]! */
|
| /* Second bundle: */
|
| - 0xe7dfcf1f, /* bfc ip, #30, #2 */
|
| - 0xe59cc000, /* ldr ip, [ip] */
|
| + 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
|
| + 0xe59cc000, /* ldr ip, [ip] */
|
| 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
|
| - 0xe12fff1c, /* bx ip */
|
| + 0xe12fff1c, /* bx ip */
|
| /* Third bundle: */
|
| - 0xe320f000, /* nop */
|
| - 0xe320f000, /* nop */
|
| - 0xe320f000, /* nop */
|
| + 0xe320f000, /* nop */
|
| + 0xe320f000, /* nop */
|
| + 0xe320f000, /* nop */
|
| /* .Lplt_tail: */
|
| 0xe50dc004, /* str ip, [sp, #-4] */
|
| /* Fourth bundle: */
|
| - 0xe7dfcf1f, /* bfc ip, #30, #2 */
|
| - 0xe59cc000, /* ldr ip, [ip] */
|
| + 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
|
| + 0xe59cc000, /* ldr ip, [ip] */
|
| 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
|
| - 0xe12fff1c, /* bx ip */
|
| + 0xe12fff1c, /* bx ip */
|
| };
|
| #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
|
|
|
| @@ -2413,6 +2415,33 @@ static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
|
| DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
|
| };
|
|
|
| +/* NaCl ARM -> ARM long branch stub. */
|
| +static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
|
| +{
|
| + ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
|
| + ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
|
| + ARM_INSN (0xe12fff1c), /* bx ip */
|
| + ARM_INSN (0xe320f000), /* nop */
|
| + ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
|
| + DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
|
| + DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
|
| + DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
|
| +};
|
| +
|
| +/* NaCl ARM -> ARM long branch stub, PIC. */
|
| +static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
|
| +{
|
| + ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
|
| + ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
|
| + ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
|
| + ARM_INSN (0xe12fff1c), /* bx ip */
|
| + ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
|
| + DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
|
| + DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
|
| + DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
|
| +};
|
| +
|
| +
|
| /* Cortex-A8 erratum-workaround stubs. */
|
|
|
| /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
|
| @@ -2489,6 +2518,8 @@ static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
|
| DEF_STUB(long_branch_thumb_only_pic) \
|
| DEF_STUB(long_branch_any_tls_pic) \
|
| DEF_STUB(long_branch_v4t_thumb_tls_pic) \
|
| + DEF_STUB(long_branch_arm_nacl) \
|
| + DEF_STUB(long_branch_arm_nacl_pic) \
|
| DEF_STUB(a8_veneer_b_cond) \
|
| DEF_STUB(a8_veneer_b) \
|
| DEF_STUB(a8_veneer_bl) \
|
| @@ -2981,7 +3012,7 @@ struct elf32_arm_link_hash_table
|
| bfd *stub_bfd;
|
|
|
| /* Linker call-backs. */
|
| - asection * (*add_stub_section) (const char *, asection *);
|
| + asection * (*add_stub_section) (const char *, asection *, unsigned int);
|
| void (*layout_sections_again) (void);
|
|
|
| /* Array to keep track of which stub sections have been created, and
|
| @@ -3001,8 +3032,8 @@ struct elf32_arm_link_hash_table
|
|
|
| static struct bfd_hash_entry *
|
| elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
|
| - struct bfd_hash_table * table,
|
| - const char * string)
|
| + struct bfd_hash_table * table,
|
| + const char * string)
|
| {
|
| struct elf32_arm_link_hash_entry * ret =
|
| (struct elf32_arm_link_hash_entry *) entry;
|
| @@ -3011,7 +3042,7 @@ elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
|
| subclass. */
|
| if (ret == NULL)
|
| ret = (struct elf32_arm_link_hash_entry *)
|
| - bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
|
| + bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
|
| if (ret == NULL)
|
| return (struct bfd_hash_entry *) ret;
|
|
|
| @@ -3185,7 +3216,7 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
|
| if (entry == NULL)
|
| {
|
| entry = (struct bfd_hash_entry *)
|
| - bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
|
| + bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
|
| if (entry == NULL)
|
| return entry;
|
| }
|
| @@ -3414,7 +3445,7 @@ elf32_arm_link_hash_table_create (bfd *abfd)
|
| struct elf32_arm_link_hash_table *ret;
|
| bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
|
|
|
| - ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
|
| + ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
|
| if (ret == NULL)
|
| return NULL;
|
|
|
| @@ -3427,27 +3458,7 @@ elf32_arm_link_hash_table_create (bfd *abfd)
|
| return NULL;
|
| }
|
|
|
| - ret->sdynbss = NULL;
|
| - ret->srelbss = NULL;
|
| - ret->srelplt2 = NULL;
|
| - ret->dt_tlsdesc_plt = 0;
|
| - ret->dt_tlsdesc_got = 0;
|
| - ret->tls_trampoline = 0;
|
| - ret->next_tls_desc_index = 0;
|
| - ret->num_tls_desc = 0;
|
| - ret->thumb_glue_size = 0;
|
| - ret->arm_glue_size = 0;
|
| - ret->bx_glue_size = 0;
|
| - memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
|
| ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
|
| - ret->vfp11_erratum_glue_size = 0;
|
| - ret->num_vfp11_fixes = 0;
|
| - ret->fix_cortex_a8 = 0;
|
| - ret->fix_arm1176 = 0;
|
| - ret->bfd_of_glue_owner = NULL;
|
| - ret->byteswap_code = 0;
|
| - ret->target1_is_rel = 0;
|
| - ret->target2_reloc = R_ARM_NONE;
|
| #ifdef FOUR_WORD_PLT
|
| ret->plt_header_size = 16;
|
| ret->plt_entry_size = 16;
|
| @@ -3455,23 +3466,8 @@ elf32_arm_link_hash_table_create (bfd *abfd)
|
| ret->plt_header_size = 20;
|
| ret->plt_entry_size = 12;
|
| #endif
|
| - ret->fix_v4bx = 0;
|
| - ret->use_blx = 0;
|
| - ret->vxworks_p = 0;
|
| - ret->symbian_p = 0;
|
| - ret->nacl_p = 0;
|
| ret->use_rel = 1;
|
| - ret->sym_cache.abfd = NULL;
|
| ret->obfd = abfd;
|
| - ret->tls_ldm_got.refcount = 0;
|
| - ret->stub_bfd = NULL;
|
| - ret->add_stub_section = NULL;
|
| - ret->layout_sections_again = NULL;
|
| - ret->stub_group = NULL;
|
| - ret->top_id = 0;
|
| - ret->bfd_count = 0;
|
| - ret->top_index = 0;
|
| - ret->input_list = NULL;
|
|
|
| if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
|
| sizeof (struct elf32_arm_stub_hash_entry)))
|
| @@ -3492,7 +3488,7 @@ elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
|
| = (struct elf32_arm_link_hash_table *) hash;
|
|
|
| bfd_hash_table_free (&ret->stub_hash_table);
|
| - _bfd_generic_link_hash_table_free (hash);
|
| + _bfd_elf_link_hash_table_free (hash);
|
| }
|
|
|
| /* Determine if we're dealing with a Thumb only architecture. */
|
| @@ -3613,6 +3609,12 @@ arm_type_of_stub (struct bfd_link_info *info,
|
|
|
| r_type = ELF32_R_TYPE (rel->r_info);
|
|
|
| + /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
|
| + are considering a function call relocation. */
|
| + if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
|
| + && branch_type == ST_BRANCH_TO_ARM)
|
| + branch_type = ST_BRANCH_TO_THUMB;
|
| +
|
| /* For TLS call relocs, it is the caller's responsibility to provide
|
| the address of the appropriate trampoline. */
|
| if (r_type != R_ARM_TLS_CALL
|
| @@ -3657,11 +3659,11 @@ arm_type_of_stub (struct bfd_link_info *info,
|
| {
|
| /* Handle cases where:
|
| - this call goes too far (different Thumb/Thumb2 max
|
| - distance)
|
| + distance)
|
| - it's a Thumb->Arm call and blx is not available, or it's a
|
| - Thumb->Arm branch (not bl). A stub is needed in this case,
|
| - but only if this call is not through a PLT entry. Indeed,
|
| - PLT stubs handle mode switching already.
|
| + Thumb->Arm branch (not bl). A stub is needed in this case,
|
| + but only if this call is not through a PLT entry. Indeed,
|
| + PLT stubs handle mode switching already.
|
| */
|
| if ((!thumb2
|
| && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
|
| @@ -3805,9 +3807,13 @@ arm_type_of_stub (struct bfd_link_info *info,
|
| ? (r_type == R_ARM_TLS_CALL
|
| /* TLS PIC Stub */
|
| ? arm_stub_long_branch_any_tls_pic
|
| - : arm_stub_long_branch_any_arm_pic)
|
| + : (globals->nacl_p
|
| + ? arm_stub_long_branch_arm_nacl_pic
|
| + : arm_stub_long_branch_any_arm_pic))
|
| /* non-PIC stubs. */
|
| - : arm_stub_long_branch_any_any;
|
| + : (globals->nacl_p
|
| + ? arm_stub_long_branch_arm_nacl
|
| + : arm_stub_long_branch_any_any);
|
| }
|
| }
|
| }
|
| @@ -3943,7 +3949,8 @@ elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
|
|
|
| memcpy (s_name, link_sec->name, namelen);
|
| memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
|
| - stub_sec = (*htab->add_stub_section) (s_name, link_sec);
|
| + stub_sec = (*htab->add_stub_section) (s_name, link_sec,
|
| + htab->nacl_p ? 4 : 3);
|
| if (stub_sec == NULL)
|
| return NULL;
|
| htab->stub_group[link_sec->id].stub_sec = stub_sec;
|
| @@ -4076,6 +4083,10 @@ arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
|
| case arm_stub_a8_veneer_blx:
|
| return 4;
|
|
|
| + case arm_stub_long_branch_arm_nacl:
|
| + case arm_stub_long_branch_arm_nacl_pic:
|
| + return 16;
|
| +
|
| default:
|
| abort (); /* Should be unreachable. */
|
| }
|
| @@ -4085,7 +4096,7 @@ static bfd_boolean
|
| arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
| void * in_arg)
|
| {
|
| -#define MAXRELOCS 2
|
| +#define MAXRELOCS 3
|
| struct elf32_arm_stub_hash_entry *stub_entry;
|
| struct elf32_arm_link_hash_table *globals;
|
| struct bfd_link_info *info;
|
| @@ -4140,11 +4151,11 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
| bfd_vma data = (bfd_vma) template_sequence[i].data;
|
| if (template_sequence[i].reloc_addend != 0)
|
| {
|
| - /* We've borrowed the reloc_addend field to mean we should
|
| - insert a condition code into this (Thumb-1 branch)
|
| - instruction. See THUMB16_BCOND_INSN. */
|
| - BFD_ASSERT ((data & 0xff00) == 0xd000);
|
| - data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
|
| + /* We've borrowed the reloc_addend field to mean we should
|
| + insert a condition code into this (Thumb-1 branch)
|
| + instruction. See THUMB16_BCOND_INSN. */
|
| + BFD_ASSERT ((data & 0xff00) == 0xd000);
|
| + data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
|
| }
|
| bfd_put_16 (stub_bfd, data, loc + size);
|
| size += 2;
|
| @@ -4157,13 +4168,13 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
| loc + size);
|
| bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
|
| loc + size + 2);
|
| - if (template_sequence[i].r_type != R_ARM_NONE)
|
| - {
|
| - stub_reloc_idx[nrelocs] = i;
|
| - stub_reloc_offset[nrelocs++] = size;
|
| - }
|
| - size += 4;
|
| - break;
|
| + if (template_sequence[i].r_type != R_ARM_NONE)
|
| + {
|
| + stub_reloc_idx[nrelocs] = i;
|
| + stub_reloc_offset[nrelocs++] = size;
|
| + }
|
| + size += 4;
|
| + break;
|
|
|
| case ARM_TYPE:
|
| bfd_put_32 (stub_bfd, template_sequence[i].data,
|
| @@ -4221,7 +4232,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
|
|
| rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
|
| rel.r_info = ELF32_R_INFO (0,
|
| - template_sequence[stub_reloc_idx[i]].r_type);
|
| + template_sequence[stub_reloc_idx[i]].r_type);
|
| rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
|
|
|
| if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
|
| @@ -4254,7 +4265,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
|
|
| rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
|
| rel.r_info = ELF32_R_INFO (0,
|
| - template_sequence[stub_reloc_idx[i]].r_type);
|
| + template_sequence[stub_reloc_idx[i]].r_type);
|
| rel.r_addend = 0;
|
|
|
| elf32_arm_final_link_relocate (elf32_arm_howto_from_type
|
| @@ -4484,15 +4495,15 @@ group_sections (struct elf32_arm_link_hash_table *htab,
|
| #define NEXT_SEC PREV_SEC
|
| head = NULL;
|
| while (tail != NULL)
|
| - {
|
| - /* Pop from tail. */
|
| - asection *item = tail;
|
| - tail = PREV_SEC (item);
|
| + {
|
| + /* Pop from tail. */
|
| + asection *item = tail;
|
| + tail = PREV_SEC (item);
|
|
|
| - /* Push on head. */
|
| - NEXT_SEC (item) = head;
|
| - head = item;
|
| - }
|
| + /* Push on head. */
|
| + NEXT_SEC (item) = head;
|
| + head = item;
|
| + }
|
|
|
| while (head != NULL)
|
| {
|
| @@ -4611,65 +4622,65 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| bfd_vma base_vma;
|
|
|
| if (elf_section_type (section) != SHT_PROGBITS
|
| - || (elf_section_flags (section) & SHF_EXECINSTR) == 0
|
| - || (section->flags & SEC_EXCLUDE) != 0
|
| - || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
|
| - || (section->output_section == bfd_abs_section_ptr))
|
| - continue;
|
| + || (elf_section_flags (section) & SHF_EXECINSTR) == 0
|
| + || (section->flags & SEC_EXCLUDE) != 0
|
| + || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
|
| + || (section->output_section == bfd_abs_section_ptr))
|
| + continue;
|
|
|
| base_vma = section->output_section->vma + section->output_offset;
|
|
|
| if (elf_section_data (section)->this_hdr.contents != NULL)
|
| - contents = elf_section_data (section)->this_hdr.contents;
|
| + contents = elf_section_data (section)->this_hdr.contents;
|
| else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
|
| - return TRUE;
|
| + return TRUE;
|
|
|
| sec_data = elf32_arm_section_data (section);
|
|
|
| for (span = 0; span < sec_data->mapcount; span++)
|
| - {
|
| - unsigned int span_start = sec_data->map[span].vma;
|
| - unsigned int span_end = (span == sec_data->mapcount - 1)
|
| - ? section->size : sec_data->map[span + 1].vma;
|
| - unsigned int i;
|
| - char span_type = sec_data->map[span].type;
|
| - bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
|
| -
|
| - if (span_type != 't')
|
| - continue;
|
| -
|
| - /* Span is entirely within a single 4KB region: skip scanning. */
|
| - if (((base_vma + span_start) & ~0xfff)
|
| + {
|
| + unsigned int span_start = sec_data->map[span].vma;
|
| + unsigned int span_end = (span == sec_data->mapcount - 1)
|
| + ? section->size : sec_data->map[span + 1].vma;
|
| + unsigned int i;
|
| + char span_type = sec_data->map[span].type;
|
| + bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
|
| +
|
| + if (span_type != 't')
|
| + continue;
|
| +
|
| + /* Span is entirely within a single 4KB region: skip scanning. */
|
| + if (((base_vma + span_start) & ~0xfff)
|
| == ((base_vma + span_end) & ~0xfff))
|
| - continue;
|
| -
|
| - /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
|
| -
|
| - * The opcode is BLX.W, BL.W, B.W, Bcc.W
|
| - * The branch target is in the same 4KB region as the
|
| - first half of the branch.
|
| - * The instruction before the branch is a 32-bit
|
| - length non-branch instruction. */
|
| - for (i = span_start; i < span_end;)
|
| - {
|
| - unsigned int insn = bfd_getl16 (&contents[i]);
|
| - bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
|
| + continue;
|
| +
|
| + /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
|
| +
|
| + * The opcode is BLX.W, BL.W, B.W, Bcc.W
|
| + * The branch target is in the same 4KB region as the
|
| + first half of the branch.
|
| + * The instruction before the branch is a 32-bit
|
| + length non-branch instruction. */
|
| + for (i = span_start; i < span_end;)
|
| + {
|
| + unsigned int insn = bfd_getl16 (&contents[i]);
|
| + bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
|
| bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
|
|
|
| - if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
|
| - insn_32bit = TRUE;
|
| + if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
|
| + insn_32bit = TRUE;
|
|
|
| if (insn_32bit)
|
| - {
|
| - /* Load the rest of the insn (in manual-friendly order). */
|
| - insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
|
| -
|
| - /* Encoding T4: B<c>.W. */
|
| - is_b = (insn & 0xf800d000) == 0xf0009000;
|
| - /* Encoding T1: BL<c>.W. */
|
| - is_bl = (insn & 0xf800d000) == 0xf000d000;
|
| - /* Encoding T2: BLX<c>.W. */
|
| - is_blx = (insn & 0xf800d000) == 0xf000c000;
|
| + {
|
| + /* Load the rest of the insn (in manual-friendly order). */
|
| + insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
|
| +
|
| + /* Encoding T4: B<c>.W. */
|
| + is_b = (insn & 0xf800d000) == 0xf0009000;
|
| + /* Encoding T1: BL<c>.W. */
|
| + is_bl = (insn & 0xf800d000) == 0xf000d000;
|
| + /* Encoding T2: BLX<c>.W. */
|
| + is_blx = (insn & 0xf800d000) == 0xf000c000;
|
| /* Encoding T3: B<c>.W (not permitted in IT block). */
|
| is_bcc = (insn & 0xf800d000) == 0xf0008000
|
| && (insn & 0x07f00000) != 0x03800000;
|
| @@ -4677,25 +4688,25 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
|
|
| is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
|
|
|
| - if (((base_vma + i) & 0xfff) == 0xffe
|
| + if (((base_vma + i) & 0xfff) == 0xffe
|
| && insn_32bit
|
| && is_32bit_branch
|
| && last_was_32bit
|
| && ! last_was_branch)
|
| - {
|
| - bfd_signed_vma offset = 0;
|
| - bfd_boolean force_target_arm = FALSE;
|
| + {
|
| + bfd_signed_vma offset = 0;
|
| + bfd_boolean force_target_arm = FALSE;
|
| bfd_boolean force_target_thumb = FALSE;
|
| - bfd_vma target;
|
| - enum elf32_arm_stub_type stub_type = arm_stub_none;
|
| - struct a8_erratum_reloc key, *found;
|
| - bfd_boolean use_plt = FALSE;
|
| + bfd_vma target;
|
| + enum elf32_arm_stub_type stub_type = arm_stub_none;
|
| + struct a8_erratum_reloc key, *found;
|
| + bfd_boolean use_plt = FALSE;
|
|
|
| - key.from = base_vma + i;
|
| - found = (struct a8_erratum_reloc *)
|
| - bsearch (&key, a8_relocs, num_a8_relocs,
|
| - sizeof (struct a8_erratum_reloc),
|
| - &a8_reloc_compare);
|
| + key.from = base_vma + i;
|
| + found = (struct a8_erratum_reloc *)
|
| + bsearch (&key, a8_relocs, num_a8_relocs,
|
| + sizeof (struct a8_erratum_reloc),
|
| + &a8_reloc_compare);
|
|
|
| if (found)
|
| {
|
| @@ -4703,7 +4714,7 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| struct elf_link_hash_entry *entry;
|
|
|
| /* We don't care about the error returned from this
|
| - function, only if there is glue or not. */
|
| + function, only if there is glue or not. */
|
| entry = find_thumb_glue (info, found->sym_name,
|
| &error_message);
|
|
|
| @@ -4725,7 +4736,7 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| }
|
| }
|
|
|
| - /* Check if we have an offending branch instruction. */
|
| + /* Check if we have an offending branch instruction. */
|
|
|
| if (found && found->non_a8_stub)
|
| /* We've already made a stub for this instruction, e.g.
|
| @@ -4733,46 +4744,46 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| stub will suffice to work around the A8 erratum (see
|
| setting of always_after_branch above). */
|
| ;
|
| - else if (is_bcc)
|
| - {
|
| - offset = (insn & 0x7ff) << 1;
|
| - offset |= (insn & 0x3f0000) >> 4;
|
| - offset |= (insn & 0x2000) ? 0x40000 : 0;
|
| - offset |= (insn & 0x800) ? 0x80000 : 0;
|
| - offset |= (insn & 0x4000000) ? 0x100000 : 0;
|
| - if (offset & 0x100000)
|
| - offset |= ~ ((bfd_signed_vma) 0xfffff);
|
| - stub_type = arm_stub_a8_veneer_b_cond;
|
| - }
|
| - else if (is_b || is_bl || is_blx)
|
| - {
|
| - int s = (insn & 0x4000000) != 0;
|
| - int j1 = (insn & 0x2000) != 0;
|
| - int j2 = (insn & 0x800) != 0;
|
| - int i1 = !(j1 ^ s);
|
| - int i2 = !(j2 ^ s);
|
| -
|
| - offset = (insn & 0x7ff) << 1;
|
| - offset |= (insn & 0x3ff0000) >> 4;
|
| - offset |= i2 << 22;
|
| - offset |= i1 << 23;
|
| - offset |= s << 24;
|
| - if (offset & 0x1000000)
|
| - offset |= ~ ((bfd_signed_vma) 0xffffff);
|
| -
|
| - if (is_blx)
|
| - offset &= ~ ((bfd_signed_vma) 3);
|
| -
|
| - stub_type = is_blx ? arm_stub_a8_veneer_blx :
|
| - is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
|
| - }
|
| -
|
| - if (stub_type != arm_stub_none)
|
| - {
|
| - bfd_vma pc_for_insn = base_vma + i + 4;
|
| + else if (is_bcc)
|
| + {
|
| + offset = (insn & 0x7ff) << 1;
|
| + offset |= (insn & 0x3f0000) >> 4;
|
| + offset |= (insn & 0x2000) ? 0x40000 : 0;
|
| + offset |= (insn & 0x800) ? 0x80000 : 0;
|
| + offset |= (insn & 0x4000000) ? 0x100000 : 0;
|
| + if (offset & 0x100000)
|
| + offset |= ~ ((bfd_signed_vma) 0xfffff);
|
| + stub_type = arm_stub_a8_veneer_b_cond;
|
| + }
|
| + else if (is_b || is_bl || is_blx)
|
| + {
|
| + int s = (insn & 0x4000000) != 0;
|
| + int j1 = (insn & 0x2000) != 0;
|
| + int j2 = (insn & 0x800) != 0;
|
| + int i1 = !(j1 ^ s);
|
| + int i2 = !(j2 ^ s);
|
| +
|
| + offset = (insn & 0x7ff) << 1;
|
| + offset |= (insn & 0x3ff0000) >> 4;
|
| + offset |= i2 << 22;
|
| + offset |= i1 << 23;
|
| + offset |= s << 24;
|
| + if (offset & 0x1000000)
|
| + offset |= ~ ((bfd_signed_vma) 0xffffff);
|
| +
|
| + if (is_blx)
|
| + offset &= ~ ((bfd_signed_vma) 3);
|
| +
|
| + stub_type = is_blx ? arm_stub_a8_veneer_blx :
|
| + is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
|
| + }
|
| +
|
| + if (stub_type != arm_stub_none)
|
| + {
|
| + bfd_vma pc_for_insn = base_vma + i + 4;
|
|
|
| /* The original instruction is a BL, but the target is
|
| - an ARM instruction. If we were not making a stub,
|
| + an ARM instruction. If we were not making a stub,
|
| the BL would have been converted to a BLX. Use the
|
| BLX stub instead in that case. */
|
| if (htab->use_blx && force_target_arm
|
| @@ -4793,43 +4804,43 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| is_bl = TRUE;
|
| }
|
|
|
| - if (is_blx)
|
| - pc_for_insn &= ~ ((bfd_vma) 3);
|
| + if (is_blx)
|
| + pc_for_insn &= ~ ((bfd_vma) 3);
|
|
|
| - /* If we found a relocation, use the proper destination,
|
| - not the offset in the (unrelocated) instruction.
|
| + /* If we found a relocation, use the proper destination,
|
| + not the offset in the (unrelocated) instruction.
|
| Note this is always done if we switched the stub type
|
| above. */
|
| - if (found)
|
| - offset =
|
| + if (found)
|
| + offset =
|
| (bfd_signed_vma) (found->destination - pc_for_insn);
|
|
|
| - /* If the stub will use a Thumb-mode branch to a
|
| - PLT target, redirect it to the preceding Thumb
|
| - entry point. */
|
| - if (stub_type != arm_stub_a8_veneer_blx && use_plt)
|
| - offset -= PLT_THUMB_STUB_SIZE;
|
| + /* If the stub will use a Thumb-mode branch to a
|
| + PLT target, redirect it to the preceding Thumb
|
| + entry point. */
|
| + if (stub_type != arm_stub_a8_veneer_blx && use_plt)
|
| + offset -= PLT_THUMB_STUB_SIZE;
|
|
|
| - target = pc_for_insn + offset;
|
| + target = pc_for_insn + offset;
|
|
|
| - /* The BLX stub is ARM-mode code. Adjust the offset to
|
| - take the different PC value (+8 instead of +4) into
|
| + /* The BLX stub is ARM-mode code. Adjust the offset to
|
| + take the different PC value (+8 instead of +4) into
|
| account. */
|
| - if (stub_type == arm_stub_a8_veneer_blx)
|
| - offset += 4;
|
| -
|
| - if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
|
| - {
|
| - char *stub_name = NULL;
|
| -
|
| - if (num_a8_fixes == a8_fix_table_size)
|
| - {
|
| - a8_fix_table_size *= 2;
|
| - a8_fixes = (struct a8_erratum_fix *)
|
| - bfd_realloc (a8_fixes,
|
| - sizeof (struct a8_erratum_fix)
|
| - * a8_fix_table_size);
|
| - }
|
| + if (stub_type == arm_stub_a8_veneer_blx)
|
| + offset += 4;
|
| +
|
| + if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
|
| + {
|
| + char *stub_name = NULL;
|
| +
|
| + if (num_a8_fixes == a8_fix_table_size)
|
| + {
|
| + a8_fix_table_size *= 2;
|
| + a8_fixes = (struct a8_erratum_fix *)
|
| + bfd_realloc (a8_fixes,
|
| + sizeof (struct a8_erratum_fix)
|
| + * a8_fix_table_size);
|
| + }
|
|
|
| if (num_a8_fixes < prev_num_a8_fixes)
|
| {
|
| @@ -4854,29 +4865,29 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
| sprintf (stub_name, "%x:%x", section->id, i);
|
| }
|
|
|
| - a8_fixes[num_a8_fixes].input_bfd = input_bfd;
|
| - a8_fixes[num_a8_fixes].section = section;
|
| - a8_fixes[num_a8_fixes].offset = i;
|
| - a8_fixes[num_a8_fixes].addend = offset;
|
| - a8_fixes[num_a8_fixes].orig_insn = insn;
|
| - a8_fixes[num_a8_fixes].stub_name = stub_name;
|
| - a8_fixes[num_a8_fixes].stub_type = stub_type;
|
| - a8_fixes[num_a8_fixes].branch_type =
|
| + a8_fixes[num_a8_fixes].input_bfd = input_bfd;
|
| + a8_fixes[num_a8_fixes].section = section;
|
| + a8_fixes[num_a8_fixes].offset = i;
|
| + a8_fixes[num_a8_fixes].addend = offset;
|
| + a8_fixes[num_a8_fixes].orig_insn = insn;
|
| + a8_fixes[num_a8_fixes].stub_name = stub_name;
|
| + a8_fixes[num_a8_fixes].stub_type = stub_type;
|
| + a8_fixes[num_a8_fixes].branch_type =
|
| is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
|
|
|
| - num_a8_fixes++;
|
| - }
|
| - }
|
| - }
|
| + num_a8_fixes++;
|
| + }
|
| + }
|
| + }
|
|
|
| - i += insn_32bit ? 4 : 2;
|
| - last_was_32bit = insn_32bit;
|
| + i += insn_32bit ? 4 : 2;
|
| + last_was_32bit = insn_32bit;
|
| last_was_branch = is_32bit_branch;
|
| - }
|
| - }
|
| + }
|
| + }
|
|
|
| if (elf_section_data (section)->this_hdr.contents == NULL)
|
| - free (contents);
|
| + free (contents);
|
| }
|
|
|
| *a8_fixes_p = a8_fixes;
|
| @@ -4897,7 +4908,8 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| bfd *stub_bfd,
|
| struct bfd_link_info *info,
|
| bfd_signed_vma group_size,
|
| - asection * (*add_stub_section) (const char *, asection *),
|
| + asection * (*add_stub_section) (const char *, asection *,
|
| + unsigned int),
|
| void (*layout_sections_again) (void))
|
| {
|
| bfd_size_type stub_group_size;
|
| @@ -4914,9 +4926,9 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| if (htab->fix_cortex_a8)
|
| {
|
| a8_fixes = (struct a8_erratum_fix *)
|
| - bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
|
| + bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
|
| a8_relocs = (struct a8_erratum_reloc *)
|
| - bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
|
| + bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
|
| }
|
|
|
| /* Propagate mach to stub bfd, because it may not have been
|
| @@ -4984,6 +4996,9 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| asection *section;
|
| Elf_Internal_Sym *local_syms = NULL;
|
|
|
| + if (!is_arm_elf (input_bfd))
|
| + continue;
|
| +
|
| num_a8_relocs = 0;
|
|
|
| /* We'll need the symbol table in a second. */
|
| @@ -5255,99 +5270,99 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| goto error_ret_free_internal;
|
| }
|
|
|
| - stub_entry->target_value = sym_value;
|
| - stub_entry->target_section = sym_sec;
|
| - stub_entry->stub_type = stub_type;
|
| - stub_entry->h = hash;
|
| - stub_entry->branch_type = branch_type;
|
| -
|
| - if (sym_name == NULL)
|
| - sym_name = "unnamed";
|
| - stub_entry->output_name = (char *)
|
| - bfd_alloc (htab->stub_bfd,
|
| - sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
|
| - + strlen (sym_name));
|
| - if (stub_entry->output_name == NULL)
|
| - {
|
| - free (stub_name);
|
| - goto error_ret_free_internal;
|
| - }
|
| -
|
| - /* For historical reasons, use the existing names for
|
| - ARM-to-Thumb and Thumb-to-ARM stubs. */
|
| - if ((r_type == (unsigned int) R_ARM_THM_CALL
|
| + stub_entry->target_value = sym_value;
|
| + stub_entry->target_section = sym_sec;
|
| + stub_entry->stub_type = stub_type;
|
| + stub_entry->h = hash;
|
| + stub_entry->branch_type = branch_type;
|
| +
|
| + if (sym_name == NULL)
|
| + sym_name = "unnamed";
|
| + stub_entry->output_name = (char *)
|
| + bfd_alloc (htab->stub_bfd,
|
| + sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
|
| + + strlen (sym_name));
|
| + if (stub_entry->output_name == NULL)
|
| + {
|
| + free (stub_name);
|
| + goto error_ret_free_internal;
|
| + }
|
| +
|
| + /* For historical reasons, use the existing names for
|
| + ARM-to-Thumb and Thumb-to-ARM stubs. */
|
| + if ((r_type == (unsigned int) R_ARM_THM_CALL
|
| || r_type == (unsigned int) R_ARM_THM_JUMP24)
|
| && branch_type == ST_BRANCH_TO_ARM)
|
| - sprintf (stub_entry->output_name,
|
| - THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
|
| - else if ((r_type == (unsigned int) R_ARM_CALL
|
| + sprintf (stub_entry->output_name,
|
| + THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
|
| + else if ((r_type == (unsigned int) R_ARM_CALL
|
| || r_type == (unsigned int) R_ARM_JUMP24)
|
| && branch_type == ST_BRANCH_TO_THUMB)
|
| - sprintf (stub_entry->output_name,
|
| - ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
|
| - else
|
| - sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
|
| - sym_name);
|
| -
|
| - stub_changed = TRUE;
|
| - }
|
| - while (0);
|
| -
|
| - /* Look for relocations which might trigger Cortex-A8
|
| - erratum. */
|
| - if (htab->fix_cortex_a8
|
| - && (r_type == (unsigned int) R_ARM_THM_JUMP24
|
| - || r_type == (unsigned int) R_ARM_THM_JUMP19
|
| - || r_type == (unsigned int) R_ARM_THM_CALL
|
| - || r_type == (unsigned int) R_ARM_THM_XPC22))
|
| - {
|
| - bfd_vma from = section->output_section->vma
|
| - + section->output_offset
|
| - + irela->r_offset;
|
| -
|
| - if ((from & 0xfff) == 0xffe)
|
| - {
|
| - /* Found a candidate. Note we haven't checked the
|
| - destination is within 4K here: if we do so (and
|
| - don't create an entry in a8_relocs) we can't tell
|
| - that a branch should have been relocated when
|
| - scanning later. */
|
| - if (num_a8_relocs == a8_reloc_table_size)
|
| - {
|
| - a8_reloc_table_size *= 2;
|
| - a8_relocs = (struct a8_erratum_reloc *)
|
| - bfd_realloc (a8_relocs,
|
| - sizeof (struct a8_erratum_reloc)
|
| - * a8_reloc_table_size);
|
| - }
|
| -
|
| - a8_relocs[num_a8_relocs].from = from;
|
| - a8_relocs[num_a8_relocs].destination = destination;
|
| - a8_relocs[num_a8_relocs].r_type = r_type;
|
| - a8_relocs[num_a8_relocs].branch_type = branch_type;
|
| - a8_relocs[num_a8_relocs].sym_name = sym_name;
|
| - a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
|
| - a8_relocs[num_a8_relocs].hash = hash;
|
| -
|
| - num_a8_relocs++;
|
| - }
|
| - }
|
| + sprintf (stub_entry->output_name,
|
| + ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
|
| + else
|
| + sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
|
| + sym_name);
|
| +
|
| + stub_changed = TRUE;
|
| + }
|
| + while (0);
|
| +
|
| + /* Look for relocations which might trigger Cortex-A8
|
| + erratum. */
|
| + if (htab->fix_cortex_a8
|
| + && (r_type == (unsigned int) R_ARM_THM_JUMP24
|
| + || r_type == (unsigned int) R_ARM_THM_JUMP19
|
| + || r_type == (unsigned int) R_ARM_THM_CALL
|
| + || r_type == (unsigned int) R_ARM_THM_XPC22))
|
| + {
|
| + bfd_vma from = section->output_section->vma
|
| + + section->output_offset
|
| + + irela->r_offset;
|
| +
|
| + if ((from & 0xfff) == 0xffe)
|
| + {
|
| + /* Found a candidate. Note we haven't checked the
|
| + destination is within 4K here: if we do so (and
|
| + don't create an entry in a8_relocs) we can't tell
|
| + that a branch should have been relocated when
|
| + scanning later. */
|
| + if (num_a8_relocs == a8_reloc_table_size)
|
| + {
|
| + a8_reloc_table_size *= 2;
|
| + a8_relocs = (struct a8_erratum_reloc *)
|
| + bfd_realloc (a8_relocs,
|
| + sizeof (struct a8_erratum_reloc)
|
| + * a8_reloc_table_size);
|
| + }
|
| +
|
| + a8_relocs[num_a8_relocs].from = from;
|
| + a8_relocs[num_a8_relocs].destination = destination;
|
| + a8_relocs[num_a8_relocs].r_type = r_type;
|
| + a8_relocs[num_a8_relocs].branch_type = branch_type;
|
| + a8_relocs[num_a8_relocs].sym_name = sym_name;
|
| + a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
|
| + a8_relocs[num_a8_relocs].hash = hash;
|
| +
|
| + num_a8_relocs++;
|
| + }
|
| + }
|
| }
|
|
|
| - /* We're done with the internal relocs, free them. */
|
| - if (elf_section_data (section)->relocs == NULL)
|
| - free (internal_relocs);
|
| - }
|
| + /* We're done with the internal relocs, free them. */
|
| + if (elf_section_data (section)->relocs == NULL)
|
| + free (internal_relocs);
|
| + }
|
|
|
| - if (htab->fix_cortex_a8)
|
| + if (htab->fix_cortex_a8)
|
| {
|
| - /* Sort relocs which might apply to Cortex-A8 erratum. */
|
| - qsort (a8_relocs, num_a8_relocs,
|
| + /* Sort relocs which might apply to Cortex-A8 erratum. */
|
| + qsort (a8_relocs, num_a8_relocs,
|
| sizeof (struct a8_erratum_reloc),
|
| - &a8_reloc_compare);
|
| + &a8_reloc_compare);
|
|
|
| - /* Scan for branches which might trigger Cortex-A8 erratum. */
|
| - if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
|
| + /* Scan for branches which might trigger Cortex-A8 erratum. */
|
| + if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
|
| &num_a8_fixes, &a8_fix_table_size,
|
| a8_relocs, num_a8_relocs,
|
| prev_num_a8_fixes, &stub_changed)
|
| @@ -5357,7 +5372,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| }
|
|
|
| if (prev_num_a8_fixes != num_a8_fixes)
|
| - stub_changed = TRUE;
|
| + stub_changed = TRUE;
|
|
|
| if (!stub_changed)
|
| break;
|
| @@ -5379,18 +5394,18 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
|
|
| /* Add Cortex-A8 erratum veneers to stub section sizes too. */
|
| if (htab->fix_cortex_a8)
|
| - for (i = 0; i < num_a8_fixes; i++)
|
| - {
|
| + for (i = 0; i < num_a8_fixes; i++)
|
| + {
|
| stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
|
| a8_fixes[i].section, htab);
|
|
|
| if (stub_sec == NULL)
|
| goto error_ret_free_local;
|
|
|
| - stub_sec->size
|
| - += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
|
| - NULL);
|
| - }
|
| + stub_sec->size
|
| + += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
|
| + NULL);
|
| + }
|
|
|
|
|
| /* Ask the linker to do its stuff. */
|
| @@ -5401,47 +5416,47 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
| if (htab->fix_cortex_a8)
|
| {
|
| for (i = 0; i < num_a8_fixes; i++)
|
| - {
|
| - struct elf32_arm_stub_hash_entry *stub_entry;
|
| - char *stub_name = a8_fixes[i].stub_name;
|
| - asection *section = a8_fixes[i].section;
|
| - unsigned int section_id = a8_fixes[i].section->id;
|
| - asection *link_sec = htab->stub_group[section_id].link_sec;
|
| - asection *stub_sec = htab->stub_group[section_id].stub_sec;
|
| - const insn_sequence *template_sequence;
|
| - int template_size, size = 0;
|
| -
|
| - stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
|
| - TRUE, FALSE);
|
| - if (stub_entry == NULL)
|
| - {
|
| - (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
|
| - section->owner,
|
| - stub_name);
|
| - return FALSE;
|
| - }
|
| -
|
| - stub_entry->stub_sec = stub_sec;
|
| - stub_entry->stub_offset = 0;
|
| - stub_entry->id_sec = link_sec;
|
| - stub_entry->stub_type = a8_fixes[i].stub_type;
|
| - stub_entry->target_section = a8_fixes[i].section;
|
| - stub_entry->target_value = a8_fixes[i].offset;
|
| - stub_entry->target_addend = a8_fixes[i].addend;
|
| - stub_entry->orig_insn = a8_fixes[i].orig_insn;
|
| + {
|
| + struct elf32_arm_stub_hash_entry *stub_entry;
|
| + char *stub_name = a8_fixes[i].stub_name;
|
| + asection *section = a8_fixes[i].section;
|
| + unsigned int section_id = a8_fixes[i].section->id;
|
| + asection *link_sec = htab->stub_group[section_id].link_sec;
|
| + asection *stub_sec = htab->stub_group[section_id].stub_sec;
|
| + const insn_sequence *template_sequence;
|
| + int template_size, size = 0;
|
| +
|
| + stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
|
| + TRUE, FALSE);
|
| + if (stub_entry == NULL)
|
| + {
|
| + (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
|
| + section->owner,
|
| + stub_name);
|
| + return FALSE;
|
| + }
|
| +
|
| + stub_entry->stub_sec = stub_sec;
|
| + stub_entry->stub_offset = 0;
|
| + stub_entry->id_sec = link_sec;
|
| + stub_entry->stub_type = a8_fixes[i].stub_type;
|
| + stub_entry->target_section = a8_fixes[i].section;
|
| + stub_entry->target_value = a8_fixes[i].offset;
|
| + stub_entry->target_addend = a8_fixes[i].addend;
|
| + stub_entry->orig_insn = a8_fixes[i].orig_insn;
|
| stub_entry->branch_type = a8_fixes[i].branch_type;
|
|
|
| - size = find_stub_size_and_template (a8_fixes[i].stub_type,
|
| - &template_sequence,
|
| - &template_size);
|
| + size = find_stub_size_and_template (a8_fixes[i].stub_type,
|
| + &template_sequence,
|
| + &template_size);
|
|
|
| - stub_entry->stub_size = size;
|
| - stub_entry->stub_template = template_sequence;
|
| - stub_entry->stub_template_size = template_size;
|
| - }
|
| + stub_entry->stub_size = size;
|
| + stub_entry->stub_template = template_sequence;
|
| + stub_entry->stub_template_size = template_size;
|
| + }
|
|
|
| /* Stash the Cortex-A8 erratum fix array for use later in
|
| - elf32_arm_write_section(). */
|
| + elf32_arm_write_section(). */
|
| htab->a8_erratum_fixes = a8_fixes;
|
| htab->num_a8_erratum_fixes = num_a8_fixes;
|
| }
|
| @@ -5521,7 +5536,7 @@ find_thumb_glue (struct bfd_link_info *link_info,
|
| return NULL;
|
|
|
| tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
|
| - + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
|
| + + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
|
|
|
| BFD_ASSERT (tmp_name);
|
|
|
| @@ -5557,7 +5572,7 @@ find_arm_glue (struct bfd_link_info *link_info,
|
| return NULL;
|
|
|
| tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
|
| - + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
|
| + + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
|
|
|
| BFD_ASSERT (tmp_name);
|
|
|
| @@ -5624,12 +5639,12 @@ static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
|
| nop ldr r6, __func_addr
|
| .arm mov lr, pc
|
| b func bx r6
|
| - .arm
|
| - ;; back_to_thumb
|
| - ldmia r13! {r6, lr}
|
| - bx lr
|
| - __func_addr:
|
| - .word func */
|
| + .arm
|
| + ;; back_to_thumb
|
| + ldmia r13! {r6, lr}
|
| + bx lr
|
| + __func_addr:
|
| + .word func */
|
|
|
| #define THUMB2ARM_GLUE_SIZE 8
|
| static const insn16 t2a1_bx_pc_insn = 0x4778;
|
| @@ -5726,7 +5741,7 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info,
|
| BFD_ASSERT (s != NULL);
|
|
|
| tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
|
| - + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
|
| + + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
|
|
|
| BFD_ASSERT (tmp_name);
|
|
|
| @@ -5817,8 +5832,8 @@ record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
|
| bh = NULL;
|
| val = globals->bx_glue_size;
|
| _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
|
| - tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
|
| - NULL, TRUE, FALSE, &bh);
|
| + tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
|
| + NULL, TRUE, FALSE, &bh);
|
|
|
| myh = (struct elf_link_hash_entry *) bh;
|
| myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
|
| @@ -5841,7 +5856,7 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
|
| if (sec_data->map == NULL)
|
| {
|
| sec_data->map = (elf32_arm_section_map *)
|
| - bfd_malloc (sizeof (elf32_arm_section_map));
|
| + bfd_malloc (sizeof (elf32_arm_section_map));
|
| sec_data->mapcount = 0;
|
| sec_data->mapsize = 1;
|
| }
|
| @@ -5852,8 +5867,8 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
|
| {
|
| sec_data->mapsize *= 2;
|
| sec_data->map = (elf32_arm_section_map *)
|
| - bfd_realloc_or_free (sec_data->map, sec_data->mapsize
|
| - * sizeof (elf32_arm_section_map));
|
| + bfd_realloc_or_free (sec_data->map, sec_data->mapsize
|
| + * sizeof (elf32_arm_section_map));
|
| }
|
|
|
| if (sec_data->map)
|
| @@ -5869,10 +5884,10 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
|
|
|
| static bfd_vma
|
| record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
|
| - elf32_vfp11_erratum_list *branch,
|
| - bfd *branch_bfd,
|
| - asection *branch_sec,
|
| - unsigned int offset)
|
| + elf32_vfp11_erratum_list *branch,
|
| + bfd *branch_bfd,
|
| + asection *branch_sec,
|
| + unsigned int offset)
|
| {
|
| asection *s;
|
| struct elf32_arm_link_hash_table *hash_table;
|
| @@ -5895,7 +5910,7 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
|
| BFD_ASSERT (s != NULL);
|
|
|
| tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
|
| - (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
|
| + (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
|
|
|
| BFD_ASSERT (tmp_name);
|
|
|
| @@ -5910,8 +5925,8 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
|
| bh = NULL;
|
| val = hash_table->vfp11_erratum_glue_size;
|
| _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
|
| - tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
|
| - NULL, TRUE, FALSE, &bh);
|
| + tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
|
| + NULL, TRUE, FALSE, &bh);
|
|
|
| myh = (struct elf_link_hash_entry *) bh;
|
| myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
|
| @@ -5958,20 +5973,20 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
|
| {
|
| bh = NULL;
|
| /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
|
| - ever requires this erratum fix. */
|
| + ever requires this erratum fix. */
|
| _bfd_generic_link_add_one_symbol (link_info,
|
| hash_table->bfd_of_glue_owner, "$a",
|
| BSF_LOCAL, s, 0, NULL,
|
| - TRUE, FALSE, &bh);
|
| + TRUE, FALSE, &bh);
|
|
|
| myh = (struct elf_link_hash_entry *) bh;
|
| myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
|
| myh->forced_local = 1;
|
|
|
| /* The elf32_arm_init_maps function only cares about symbols from input
|
| - BFDs. We must make a note of this generated mapping symbol
|
| - ourselves so that code byteswapping works properly in
|
| - elf32_arm_write_section. */
|
| + BFDs. We must make a note of this generated mapping symbol
|
| + ourselves so that code byteswapping works properly in
|
| + elf32_arm_write_section. */
|
| elf32_arm_section_map_add (s, 'a', 0);
|
| }
|
|
|
| @@ -6204,8 +6219,8 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd,
|
| {
|
| case R_ARM_PC24:
|
| /* This one is a call from arm code. We need to look up
|
| - the target of the call. If it is a thumb target, we
|
| - insert glue. */
|
| + the target of the call. If it is a thumb target, we
|
| + insert glue. */
|
| if (h->target_internal == ST_BRANCH_TO_THUMB)
|
| record_arm_to_thumb_glue (link_info, h);
|
| break;
|
| @@ -6277,15 +6292,15 @@ bfd_elf32_arm_init_maps (bfd *abfd)
|
| const char *name;
|
|
|
| if (sec != NULL
|
| - && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
|
| - {
|
| - name = bfd_elf_string_from_elf_section (abfd,
|
| - hdr->sh_link, isym->st_name);
|
| + && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
|
| + {
|
| + name = bfd_elf_string_from_elf_section (abfd,
|
| + hdr->sh_link, isym->st_name);
|
|
|
| - if (bfd_is_arm_special_symbol_name (name,
|
| + if (bfd_is_arm_special_symbol_name (name,
|
| BFD_ARM_SPECIAL_SYM_TYPE_MAP))
|
| - elf32_arm_section_map_add (sec, name[1], isym->st_value);
|
| - }
|
| + elf32_arm_section_map_add (sec, name[1], isym->st_value);
|
| + }
|
| }
|
| }
|
|
|
| @@ -6327,17 +6342,17 @@ bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
|
| if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
|
| {
|
| switch (globals->vfp11_fix)
|
| - {
|
| - case BFD_ARM_VFP11_FIX_DEFAULT:
|
| - case BFD_ARM_VFP11_FIX_NONE:
|
| - globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
|
| - break;
|
| -
|
| - default:
|
| - /* Give a warning, but do as the user requests anyway. */
|
| - (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
|
| - "workaround is not necessary for target architecture"), obfd);
|
| - }
|
| + {
|
| + case BFD_ARM_VFP11_FIX_DEFAULT:
|
| + case BFD_ARM_VFP11_FIX_NONE:
|
| + globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
|
| + break;
|
| +
|
| + default:
|
| + /* Give a warning, but do as the user requests anyway. */
|
| + (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
|
| + "workaround is not necessary for target architecture"), obfd);
|
| + }
|
| }
|
| else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
|
| /* For earlier architectures, we might need the workaround, but do not
|
| @@ -6369,7 +6384,7 @@ enum bfd_arm_vfp11_pipe
|
|
|
| static unsigned int
|
| bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
|
| - unsigned int x)
|
| + unsigned int x)
|
| {
|
| if (is_double)
|
| return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
|
| @@ -6401,15 +6416,15 @@ bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
|
| unsigned int reg = regs[i];
|
|
|
| if (reg < 32 && (wmask & (1 << reg)) != 0)
|
| - return TRUE;
|
| + return TRUE;
|
|
|
| reg -= 32;
|
|
|
| if (reg >= 16)
|
| - continue;
|
| + continue;
|
|
|
| if ((wmask & (3 << (reg * 2))) != 0)
|
| - return TRUE;
|
| + return TRUE;
|
| }
|
|
|
| return FALSE;
|
| @@ -6425,7 +6440,7 @@ bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
|
|
|
| static enum bfd_arm_vfp11_pipe
|
| bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
|
| - int *numregs)
|
| + int *numregs)
|
| {
|
| enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
|
| bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
|
| @@ -6437,96 +6452,96 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
|
| unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
|
|
|
| pqrs = ((insn & 0x00800000) >> 20)
|
| - | ((insn & 0x00300000) >> 19)
|
| - | ((insn & 0x00000040) >> 6);
|
| + | ((insn & 0x00300000) >> 19)
|
| + | ((insn & 0x00000040) >> 6);
|
|
|
| switch (pqrs)
|
| - {
|
| - case 0: /* fmac[sd]. */
|
| - case 1: /* fnmac[sd]. */
|
| - case 2: /* fmsc[sd]. */
|
| - case 3: /* fnmsc[sd]. */
|
| - vpipe = VFP11_FMAC;
|
| - bfd_arm_vfp11_write_mask (destmask, fd);
|
| - regs[0] = fd;
|
| - regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
|
| - regs[2] = fm;
|
| - *numregs = 3;
|
| - break;
|
| -
|
| - case 4: /* fmul[sd]. */
|
| - case 5: /* fnmul[sd]. */
|
| - case 6: /* fadd[sd]. */
|
| - case 7: /* fsub[sd]. */
|
| - vpipe = VFP11_FMAC;
|
| - goto vfp_binop;
|
| -
|
| - case 8: /* fdiv[sd]. */
|
| - vpipe = VFP11_DS;
|
| - vfp_binop:
|
| - bfd_arm_vfp11_write_mask (destmask, fd);
|
| - regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
|
| - regs[1] = fm;
|
| - *numregs = 2;
|
| - break;
|
| -
|
| - case 15: /* extended opcode. */
|
| - {
|
| - unsigned int extn = ((insn >> 15) & 0x1e)
|
| - | ((insn >> 7) & 1);
|
| -
|
| - switch (extn)
|
| - {
|
| - case 0: /* fcpy[sd]. */
|
| - case 1: /* fabs[sd]. */
|
| - case 2: /* fneg[sd]. */
|
| - case 8: /* fcmp[sd]. */
|
| - case 9: /* fcmpe[sd]. */
|
| - case 10: /* fcmpz[sd]. */
|
| - case 11: /* fcmpez[sd]. */
|
| - case 16: /* fuito[sd]. */
|
| - case 17: /* fsito[sd]. */
|
| - case 24: /* ftoui[sd]. */
|
| - case 25: /* ftouiz[sd]. */
|
| - case 26: /* ftosi[sd]. */
|
| - case 27: /* ftosiz[sd]. */
|
| - /* These instructions will not bounce due to underflow. */
|
| - *numregs = 0;
|
| - vpipe = VFP11_FMAC;
|
| - break;
|
| -
|
| - case 3: /* fsqrt[sd]. */
|
| - /* fsqrt cannot underflow, but it can (perhaps) overwrite
|
| - registers to cause the erratum in previous instructions. */
|
| - bfd_arm_vfp11_write_mask (destmask, fd);
|
| - vpipe = VFP11_DS;
|
| - break;
|
| -
|
| - case 15: /* fcvt{ds,sd}. */
|
| - {
|
| - int rnum = 0;
|
| -
|
| - bfd_arm_vfp11_write_mask (destmask, fd);
|
| + {
|
| + case 0: /* fmac[sd]. */
|
| + case 1: /* fnmac[sd]. */
|
| + case 2: /* fmsc[sd]. */
|
| + case 3: /* fnmsc[sd]. */
|
| + vpipe = VFP11_FMAC;
|
| + bfd_arm_vfp11_write_mask (destmask, fd);
|
| + regs[0] = fd;
|
| + regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
|
| + regs[2] = fm;
|
| + *numregs = 3;
|
| + break;
|
| +
|
| + case 4: /* fmul[sd]. */
|
| + case 5: /* fnmul[sd]. */
|
| + case 6: /* fadd[sd]. */
|
| + case 7: /* fsub[sd]. */
|
| + vpipe = VFP11_FMAC;
|
| + goto vfp_binop;
|
| +
|
| + case 8: /* fdiv[sd]. */
|
| + vpipe = VFP11_DS;
|
| + vfp_binop:
|
| + bfd_arm_vfp11_write_mask (destmask, fd);
|
| + regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
|
| + regs[1] = fm;
|
| + *numregs = 2;
|
| + break;
|
| +
|
| + case 15: /* extended opcode. */
|
| + {
|
| + unsigned int extn = ((insn >> 15) & 0x1e)
|
| + | ((insn >> 7) & 1);
|
| +
|
| + switch (extn)
|
| + {
|
| + case 0: /* fcpy[sd]. */
|
| + case 1: /* fabs[sd]. */
|
| + case 2: /* fneg[sd]. */
|
| + case 8: /* fcmp[sd]. */
|
| + case 9: /* fcmpe[sd]. */
|
| + case 10: /* fcmpz[sd]. */
|
| + case 11: /* fcmpez[sd]. */
|
| + case 16: /* fuito[sd]. */
|
| + case 17: /* fsito[sd]. */
|
| + case 24: /* ftoui[sd]. */
|
| + case 25: /* ftouiz[sd]. */
|
| + case 26: /* ftosi[sd]. */
|
| + case 27: /* ftosiz[sd]. */
|
| + /* These instructions will not bounce due to underflow. */
|
| + *numregs = 0;
|
| + vpipe = VFP11_FMAC;
|
| + break;
|
| +
|
| + case 3: /* fsqrt[sd]. */
|
| + /* fsqrt cannot underflow, but it can (perhaps) overwrite
|
| + registers to cause the erratum in previous instructions. */
|
| + bfd_arm_vfp11_write_mask (destmask, fd);
|
| + vpipe = VFP11_DS;
|
| + break;
|
| +
|
| + case 15: /* fcvt{ds,sd}. */
|
| + {
|
| + int rnum = 0;
|
| +
|
| + bfd_arm_vfp11_write_mask (destmask, fd);
|
|
|
| /* Only FCVTSD can underflow. */
|
| - if ((insn & 0x100) != 0)
|
| - regs[rnum++] = fm;
|
| + if ((insn & 0x100) != 0)
|
| + regs[rnum++] = fm;
|
|
|
| - *numregs = rnum;
|
| + *numregs = rnum;
|
|
|
| - vpipe = VFP11_FMAC;
|
| - }
|
| - break;
|
| + vpipe = VFP11_FMAC;
|
| + }
|
| + break;
|
|
|
| - default:
|
| - return VFP11_BAD;
|
| - }
|
| - }
|
| - break;
|
| + default:
|
| + return VFP11_BAD;
|
| + }
|
| + }
|
| + break;
|
|
|
| - default:
|
| - return VFP11_BAD;
|
| - }
|
| + default:
|
| + return VFP11_BAD;
|
| + }
|
| }
|
| /* Two-register transfer. */
|
| else if ((insn & 0x0fe00ed0) == 0x0c400a10)
|
| @@ -6535,13 +6550,13 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
|
|
|
| if ((insn & 0x100000) == 0)
|
| {
|
| - if (is_double)
|
| - bfd_arm_vfp11_write_mask (destmask, fm);
|
| - else
|
| - {
|
| - bfd_arm_vfp11_write_mask (destmask, fm);
|
| - bfd_arm_vfp11_write_mask (destmask, fm + 1);
|
| - }
|
| + if (is_double)
|
| + bfd_arm_vfp11_write_mask (destmask, fm);
|
| + else
|
| + {
|
| + bfd_arm_vfp11_write_mask (destmask, fm);
|
| + bfd_arm_vfp11_write_mask (destmask, fm + 1);
|
| + }
|
| }
|
|
|
| vpipe = VFP11_LS;
|
| @@ -6552,32 +6567,32 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
|
| unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
|
|
|
| switch (puw)
|
| - {
|
| - case 0: /* Two-reg transfer. We should catch these above. */
|
| - abort ();
|
| + {
|
| + case 0: /* Two-reg transfer. We should catch these above. */
|
| + abort ();
|
|
|
| - case 2: /* fldm[sdx]. */
|
| - case 3:
|
| - case 5:
|
| - {
|
| - unsigned int i, offset = insn & 0xff;
|
| + case 2: /* fldm[sdx]. */
|
| + case 3:
|
| + case 5:
|
| + {
|
| + unsigned int i, offset = insn & 0xff;
|
|
|
| - if (is_double)
|
| - offset >>= 1;
|
| + if (is_double)
|
| + offset >>= 1;
|
|
|
| - for (i = fd; i < fd + offset; i++)
|
| - bfd_arm_vfp11_write_mask (destmask, i);
|
| - }
|
| - break;
|
| + for (i = fd; i < fd + offset; i++)
|
| + bfd_arm_vfp11_write_mask (destmask, i);
|
| + }
|
| + break;
|
|
|
| - case 4: /* fld[sd]. */
|
| - case 6:
|
| - bfd_arm_vfp11_write_mask (destmask, fd);
|
| - break;
|
| + case 4: /* fld[sd]. */
|
| + case 6:
|
| + bfd_arm_vfp11_write_mask (destmask, fd);
|
| + break;
|
|
|
| - default:
|
| - return VFP11_BAD;
|
| - }
|
| + default:
|
| + return VFP11_BAD;
|
| + }
|
|
|
| vpipe = VFP11_LS;
|
| }
|
| @@ -6588,18 +6603,18 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
|
| unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
|
|
|
| switch (opcode)
|
| - {
|
| - case 0: /* fmsr/fmdlr. */
|
| - case 1: /* fmdhr. */
|
| - /* Mark fmdhr and fmdlr as writing to the whole of the DP
|
| - destination register. I don't know if this is exactly right,
|
| - but it is the conservative choice. */
|
| - bfd_arm_vfp11_write_mask (destmask, fn);
|
| - break;
|
| -
|
| - case 7: /* fmxr. */
|
| - break;
|
| - }
|
| + {
|
| + case 0: /* fmsr/fmdlr. */
|
| + case 1: /* fmdhr. */
|
| + /* Mark fmdhr and fmdlr as writing to the whole of the DP
|
| + destination register. I don't know if this is exactly right,
|
| + but it is the conservative choice. */
|
| + bfd_arm_vfp11_write_mask (destmask, fn);
|
| + break;
|
| +
|
| + case 7: /* fmxr. */
|
| + break;
|
| + }
|
|
|
| vpipe = VFP11_LS;
|
| }
|
| @@ -6633,23 +6648,23 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
|
| The states transition as follows:
|
|
|
| 0 -> 1 (vector) or 0 -> 2 (scalar)
|
| - A VFP FMAC-pipeline instruction has been seen. Fill
|
| - regs[0]..regs[numregs-1] with its input operands. Remember this
|
| - instruction in 'first_fmac'.
|
| + A VFP FMAC-pipeline instruction has been seen. Fill
|
| + regs[0]..regs[numregs-1] with its input operands. Remember this
|
| + instruction in 'first_fmac'.
|
|
|
| 1 -> 2
|
| - Any instruction, except for a VFP instruction which overwrites
|
| - regs[*].
|
| + Any instruction, except for a VFP instruction which overwrites
|
| + regs[*].
|
|
|
| 1 -> 3 [ -> 0 ] or
|
| 2 -> 3 [ -> 0 ]
|
| - A VFP instruction has been seen which overwrites any of regs[*].
|
| - We must make a veneer! Reset state to 0 before examining next
|
| - instruction.
|
| + A VFP instruction has been seen which overwrites any of regs[*].
|
| + We must make a veneer! Reset state to 0 before examining next
|
| + instruction.
|
|
|
| 2 -> 0
|
| - If we fail to match anything in state 2, reset to state 0 and reset
|
| - the instruction pointer to the instruction after 'first_fmac'.
|
| + If we fail to match anything in state 2, reset to state 0 and reset
|
| + the instruction pointer to the instruction after 'first_fmac'.
|
|
|
| If the VFP11 vector mode is in use, there must be at least two unrelated
|
| instructions between anti-dependent VFP11 instructions to properly avoid
|
| @@ -6680,19 +6695,19 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
|
| struct _arm_elf_section_data *sec_data;
|
|
|
| /* If we don't have executable progbits, we're not interested in this
|
| - section. Also skip if section is to be excluded. */
|
| + section. Also skip if section is to be excluded. */
|
| if (elf_section_type (sec) != SHT_PROGBITS
|
| - || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
|
| - || (sec->flags & SEC_EXCLUDE) != 0
|
| + || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
|
| + || (sec->flags & SEC_EXCLUDE) != 0
|
| || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
|
| || sec->output_section == bfd_abs_section_ptr
|
| - || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
|
| - continue;
|
| + || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
|
| + continue;
|
|
|
| sec_data = elf32_arm_section_data (sec);
|
|
|
| if (sec_data->mapcount == 0)
|
| - continue;
|
| + continue;
|
|
|
| if (elf_section_data (sec)->this_hdr.contents != NULL)
|
| contents = elf_section_data (sec)->this_hdr.contents;
|
| @@ -6703,122 +6718,122 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
|
| elf32_arm_compare_mapping);
|
|
|
| for (span = 0; span < sec_data->mapcount; span++)
|
| - {
|
| - unsigned int span_start = sec_data->map[span].vma;
|
| - unsigned int span_end = (span == sec_data->mapcount - 1)
|
| + {
|
| + unsigned int span_start = sec_data->map[span].vma;
|
| + unsigned int span_end = (span == sec_data->mapcount - 1)
|
| ? sec->size : sec_data->map[span + 1].vma;
|
| - char span_type = sec_data->map[span].type;
|
| -
|
| - /* FIXME: Only ARM mode is supported at present. We may need to
|
| - support Thumb-2 mode also at some point. */
|
| - if (span_type != 'a')
|
| - continue;
|
| -
|
| - for (i = span_start; i < span_end;)
|
| - {
|
| - unsigned int next_i = i + 4;
|
| - unsigned int insn = bfd_big_endian (abfd)
|
| - ? (contents[i] << 24)
|
| - | (contents[i + 1] << 16)
|
| - | (contents[i + 2] << 8)
|
| - | contents[i + 3]
|
| - : (contents[i + 3] << 24)
|
| - | (contents[i + 2] << 16)
|
| - | (contents[i + 1] << 8)
|
| - | contents[i];
|
| - unsigned int writemask = 0;
|
| - enum bfd_arm_vfp11_pipe vpipe;
|
| -
|
| - switch (state)
|
| - {
|
| - case 0:
|
| - vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
|
| - &numregs);
|
| - /* I'm assuming the VFP11 erratum can trigger with denorm
|
| - operands on either the FMAC or the DS pipeline. This might
|
| - lead to slightly overenthusiastic veneer insertion. */
|
| - if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
|
| - {
|
| - state = use_vector ? 1 : 2;
|
| - first_fmac = i;
|
| - veneer_of_insn = insn;
|
| - }
|
| - break;
|
| -
|
| - case 1:
|
| - {
|
| - int other_regs[3], other_numregs;
|
| - vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
|
| + char span_type = sec_data->map[span].type;
|
| +
|
| + /* FIXME: Only ARM mode is supported at present. We may need to
|
| + support Thumb-2 mode also at some point. */
|
| + if (span_type != 'a')
|
| + continue;
|
| +
|
| + for (i = span_start; i < span_end;)
|
| + {
|
| + unsigned int next_i = i + 4;
|
| + unsigned int insn = bfd_big_endian (abfd)
|
| + ? (contents[i] << 24)
|
| + | (contents[i + 1] << 16)
|
| + | (contents[i + 2] << 8)
|
| + | contents[i + 3]
|
| + : (contents[i + 3] << 24)
|
| + | (contents[i + 2] << 16)
|
| + | (contents[i + 1] << 8)
|
| + | contents[i];
|
| + unsigned int writemask = 0;
|
| + enum bfd_arm_vfp11_pipe vpipe;
|
| +
|
| + switch (state)
|
| + {
|
| + case 0:
|
| + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
|
| + &numregs);
|
| + /* I'm assuming the VFP11 erratum can trigger with denorm
|
| + operands on either the FMAC or the DS pipeline. This might
|
| + lead to slightly overenthusiastic veneer insertion. */
|
| + if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
|
| + {
|
| + state = use_vector ? 1 : 2;
|
| + first_fmac = i;
|
| + veneer_of_insn = insn;
|
| + }
|
| + break;
|
| +
|
| + case 1:
|
| + {
|
| + int other_regs[3], other_numregs;
|
| + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
|
| other_regs,
|
| - &other_numregs);
|
| - if (vpipe != VFP11_BAD
|
| - && bfd_arm_vfp11_antidependency (writemask, regs,
|
| + &other_numregs);
|
| + if (vpipe != VFP11_BAD
|
| + && bfd_arm_vfp11_antidependency (writemask, regs,
|
| numregs))
|
| - state = 3;
|
| - else
|
| - state = 2;
|
| - }
|
| - break;
|
| -
|
| - case 2:
|
| - {
|
| - int other_regs[3], other_numregs;
|
| - vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
|
| + state = 3;
|
| + else
|
| + state = 2;
|
| + }
|
| + break;
|
| +
|
| + case 2:
|
| + {
|
| + int other_regs[3], other_numregs;
|
| + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
|
| other_regs,
|
| - &other_numregs);
|
| - if (vpipe != VFP11_BAD
|
| - && bfd_arm_vfp11_antidependency (writemask, regs,
|
| + &other_numregs);
|
| + if (vpipe != VFP11_BAD
|
| + && bfd_arm_vfp11_antidependency (writemask, regs,
|
| numregs))
|
| - state = 3;
|
| - else
|
| - {
|
| - state = 0;
|
| - next_i = first_fmac + 4;
|
| - }
|
| - }
|
| - break;
|
| -
|
| - case 3:
|
| - abort (); /* Should be unreachable. */
|
| - }
|
| -
|
| - if (state == 3)
|
| - {
|
| - elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
|
| - bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
|
| -
|
| - elf32_arm_section_data (sec)->erratumcount += 1;
|
| -
|
| - newerr->u.b.vfp_insn = veneer_of_insn;
|
| -
|
| - switch (span_type)
|
| - {
|
| - case 'a':
|
| - newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
|
| - break;
|
| -
|
| - default:
|
| - abort ();
|
| - }
|
| -
|
| - record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
|
| + state = 3;
|
| + else
|
| + {
|
| + state = 0;
|
| + next_i = first_fmac + 4;
|
| + }
|
| + }
|
| + break;
|
| +
|
| + case 3:
|
| + abort (); /* Should be unreachable. */
|
| + }
|
| +
|
| + if (state == 3)
|
| + {
|
| + elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
|
| + bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
|
| +
|
| + elf32_arm_section_data (sec)->erratumcount += 1;
|
| +
|
| + newerr->u.b.vfp_insn = veneer_of_insn;
|
| +
|
| + switch (span_type)
|
| + {
|
| + case 'a':
|
| + newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
|
| + break;
|
| +
|
| + default:
|
| + abort ();
|
| + }
|
| +
|
| + record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
|
| first_fmac);
|
|
|
| - newerr->vma = -1;
|
| + newerr->vma = -1;
|
|
|
| - newerr->next = sec_data->erratumlist;
|
| - sec_data->erratumlist = newerr;
|
| + newerr->next = sec_data->erratumlist;
|
| + sec_data->erratumlist = newerr;
|
|
|
| - state = 0;
|
| - }
|
| + state = 0;
|
| + }
|
|
|
| - i = next_i;
|
| - }
|
| - }
|
| + i = next_i;
|
| + }
|
| + }
|
|
|
| if (contents != NULL
|
| - && elf_section_data (sec)->this_hdr.contents != contents)
|
| - free (contents);
|
| + && elf_section_data (sec)->this_hdr.contents != contents)
|
| + free (contents);
|
| contents = NULL;
|
| }
|
|
|
| @@ -6855,7 +6870,7 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
|
| return;
|
|
|
| tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
|
| - (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
|
| + (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
|
|
|
| for (sec = abfd->sections; sec != NULL; sec = sec->next)
|
| {
|
| @@ -6863,56 +6878,56 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
|
| elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
|
|
|
| for (; errnode != NULL; errnode = errnode->next)
|
| - {
|
| - struct elf_link_hash_entry *myh;
|
| - bfd_vma vma;
|
| -
|
| - switch (errnode->type)
|
| - {
|
| - case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
|
| - case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
|
| - /* Find veneer symbol. */
|
| - sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
|
| + {
|
| + struct elf_link_hash_entry *myh;
|
| + bfd_vma vma;
|
| +
|
| + switch (errnode->type)
|
| + {
|
| + case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
|
| + case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
|
| + /* Find veneer symbol. */
|
| + sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
|
| errnode->u.b.veneer->u.v.id);
|
|
|
| - myh = elf_link_hash_lookup
|
| - (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
|
| + myh = elf_link_hash_lookup
|
| + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
|
|
|
| - if (myh == NULL)
|
| - (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
|
| - "`%s'"), abfd, tmp_name);
|
| + if (myh == NULL)
|
| + (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
|
| + "`%s'"), abfd, tmp_name);
|
|
|
| - vma = myh->root.u.def.section->output_section->vma
|
| - + myh->root.u.def.section->output_offset
|
| - + myh->root.u.def.value;
|
| + vma = myh->root.u.def.section->output_section->vma
|
| + + myh->root.u.def.section->output_offset
|
| + + myh->root.u.def.value;
|
|
|
| - errnode->u.b.veneer->vma = vma;
|
| - break;
|
| + errnode->u.b.veneer->vma = vma;
|
| + break;
|
|
|
| case VFP11_ERRATUM_ARM_VENEER:
|
| - case VFP11_ERRATUM_THUMB_VENEER:
|
| - /* Find return location. */
|
| - sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
|
| - errnode->u.v.id);
|
| + case VFP11_ERRATUM_THUMB_VENEER:
|
| + /* Find return location. */
|
| + sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
|
| + errnode->u.v.id);
|
|
|
| - myh = elf_link_hash_lookup
|
| - (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
|
| + myh = elf_link_hash_lookup
|
| + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
|
|
|
| - if (myh == NULL)
|
| - (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
|
| + if (myh == NULL)
|
| + (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
|
| "`%s'"), abfd, tmp_name);
|
|
|
| - vma = myh->root.u.def.section->output_section->vma
|
| - + myh->root.u.def.section->output_offset
|
| - + myh->root.u.def.value;
|
| + vma = myh->root.u.def.section->output_section->vma
|
| + + myh->root.u.def.section->output_offset
|
| + + myh->root.u.def.value;
|
|
|
| - errnode->u.v.branch->vma = vma;
|
| - break;
|
| + errnode->u.v.branch->vma = vma;
|
| + break;
|
|
|
| - default:
|
| - abort ();
|
| - }
|
| - }
|
| + default:
|
| + abort ();
|
| + }
|
| + }
|
| }
|
|
|
| free (tmp_name);
|
| @@ -6926,9 +6941,9 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
|
| struct bfd_link_info *link_info,
|
| int target1_is_rel,
|
| char * target2_type,
|
| - int fix_v4bx,
|
| + int fix_v4bx,
|
| int use_blx,
|
| - bfd_arm_vfp11_fix vfp11_fix,
|
| + bfd_arm_vfp11_fix vfp11_fix,
|
| int no_enum_warn, int no_wchar_warn,
|
| int pic_veneer, int fix_cortex_a8,
|
| int fix_arm1176)
|
| @@ -7423,6 +7438,10 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
|
| splt = htab->root.iplt;
|
| sgotplt = htab->root.igotplt;
|
|
|
| + /* NaCl uses a special first entry in .iplt too. */
|
| + if (htab->nacl_p && splt->size == 0)
|
| + splt->size += htab->plt_header_size;
|
| +
|
| /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
|
| elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
|
| }
|
| @@ -7474,9 +7493,11 @@ arm_movt_immediate (bfd_vma value)
|
|
|
| ROOT_PLT points to the offset of the PLT entry from the start of its
|
| section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
|
| - bookkeeping information. */
|
| + bookkeeping information.
|
|
|
| -static void
|
| + Returns FALSE if there was a problem. */
|
| +
|
| +static bfd_boolean
|
| elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
|
| union gotplt_union *root_plt,
|
| struct arm_plt_info *arm_plt,
|
| @@ -7645,7 +7666,7 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
|
| in the GOT. The offset accounts for the value produced by
|
| adding to pc in the penultimate instruction of the PLT stub. */
|
| got_displacement = (got_address
|
| - - (plt_address + htab->plt_entry_size));
|
| + - (plt_address + htab->plt_entry_size));
|
|
|
| /* NaCl does not support interworking at all. */
|
| BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
|
| @@ -7666,6 +7687,16 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
|
| | (tail_displacement & 0x00ffffff),
|
| ptr + 12);
|
| }
|
| + else if (using_thumb_only (htab))
|
| + {
|
| + /* PR ld/16017: Do not generate ARM instructions for
|
| + the PLT if compiling for a thumb-only target.
|
| +
|
| + FIXME: We ought to be able to generate thumb PLT instructions... */
|
| + _bfd_error_handler (_("%B: Warning: thumb mode PLT generation not currently supported"),
|
| + output_bfd);
|
| + return FALSE;
|
| + }
|
| else
|
| {
|
| /* Calculate the displacement between the PLT slot and the
|
| @@ -7724,8 +7755,15 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
|
| sgot->contents + got_offset);
|
| }
|
|
|
| - loc = srel->contents + plt_index * RELOC_SIZE (htab);
|
| - SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
|
| + if (dynindx == -1)
|
| + elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
|
| + else
|
| + {
|
| + loc = srel->contents + plt_index * RELOC_SIZE (htab);
|
| + SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
|
| + }
|
| +
|
| + return TRUE;
|
| }
|
|
|
| /* Some relocations map to different relocations depending on the
|
| @@ -7959,28 +7997,28 @@ calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
|
|
|
| /* Calculate which part of the value to mask. */
|
| if (residual == 0)
|
| - shift = 0;
|
| + shift = 0;
|
| else
|
| - {
|
| - int msb;
|
| -
|
| - /* Determine the most significant bit in the residual and
|
| - align the resulting value to a 2-bit boundary. */
|
| - for (msb = 30; msb >= 0; msb -= 2)
|
| - if (residual & (3 << msb))
|
| - break;
|
| -
|
| - /* The desired shift is now (msb - 6), or zero, whichever
|
| - is the greater. */
|
| - shift = msb - 6;
|
| - if (shift < 0)
|
| - shift = 0;
|
| - }
|
| + {
|
| + int msb;
|
| +
|
| + /* Determine the most significant bit in the residual and
|
| + align the resulting value to a 2-bit boundary. */
|
| + for (msb = 30; msb >= 0; msb -= 2)
|
| + if (residual & (3 << msb))
|
| + break;
|
| +
|
| + /* The desired shift is now (msb - 6), or zero, whichever
|
| + is the greater. */
|
| + shift = msb - 6;
|
| + if (shift < 0)
|
| + shift = 0;
|
| + }
|
|
|
| /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
|
| g_n = residual & (0xff << shift);
|
| encoded_g_n = (g_n >> shift)
|
| - | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
|
| + | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
|
|
|
| /* Calculate the residual for the next time around. */
|
| residual &= ~g_n;
|
| @@ -8105,6 +8143,14 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| else
|
| addend = signed_addend = rel->r_addend;
|
|
|
| + /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
|
| + are resolving a function call relocation. */
|
| + if (using_thumb_only (globals)
|
| + && (r_type == R_ARM_THM_CALL
|
| + || r_type == R_ARM_THM_JUMP24)
|
| + && branch_type == ST_BRANCH_TO_ARM)
|
| + branch_type = ST_BRANCH_TO_THUMB;
|
| +
|
| /* Record the symbol information that should be used in dynamic
|
| relocations. */
|
| dynreloc_st_type = st_type;
|
| @@ -8133,9 +8179,11 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| plt_offset--;
|
| else
|
| {
|
| - elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
|
| - -1, dynreloc_value);
|
| - root_plt->offset |= 1;
|
| + if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
|
| + -1, dynreloc_value))
|
| + root_plt->offset |= 1;
|
| + else
|
| + return bfd_reloc_notsupported;
|
| }
|
|
|
| /* Static relocations always resolve to the .iplt entry. */
|
| @@ -8192,7 +8240,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| branches in this object should go to it, except if the PLT is too
|
| far away, in which case a long branch stub should be inserted. */
|
| if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
|
| - && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
|
| + && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
|
| && r_type != R_ARM_CALL
|
| && r_type != R_ARM_JUMP24
|
| && r_type != R_ARM_PLT32)
|
| @@ -8223,7 +8271,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| ".tls_vars") == 0)
|
| && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
|
| || !SYMBOL_CALLS_LOCAL (info, h))
|
| - && (!strstr (input_section->name, STUB_SUFFIX))
|
| + && !(input_bfd == globals->stub_bfd
|
| + && strstr (input_section->name, STUB_SUFFIX))
|
| && (h == NULL
|
| || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|
| || h->root.type != bfd_link_hash_undefweak)
|
| @@ -8608,36 +8657,36 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| bfd_signed_vma relocation;
|
|
|
| insn = (bfd_get_16 (input_bfd, hit_data) << 16)
|
| - | bfd_get_16 (input_bfd, hit_data + 2);
|
| + | bfd_get_16 (input_bfd, hit_data + 2);
|
|
|
| - if (globals->use_rel)
|
| - {
|
| - signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
|
| - | ((insn & (1 << 26)) >> 15);
|
| - if (insn & 0xf00000)
|
| - signed_addend = -signed_addend;
|
| - }
|
| + if (globals->use_rel)
|
| + {
|
| + signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
|
| + | ((insn & (1 << 26)) >> 15);
|
| + if (insn & 0xf00000)
|
| + signed_addend = -signed_addend;
|
| + }
|
|
|
| relocation = value + signed_addend;
|
| - relocation -= (input_section->output_section->vma
|
| - + input_section->output_offset
|
| - + rel->r_offset);
|
| + relocation -= Pa (input_section->output_section->vma
|
| + + input_section->output_offset
|
| + + rel->r_offset);
|
|
|
| - value = abs (relocation);
|
| + value = abs (relocation);
|
|
|
| - if (value >= 0x1000)
|
| - return bfd_reloc_overflow;
|
| + if (value >= 0x1000)
|
| + return bfd_reloc_overflow;
|
|
|
| insn = (insn & 0xfb0f8f00) | (value & 0xff)
|
| - | ((value & 0x700) << 4)
|
| - | ((value & 0x800) << 15);
|
| - if (relocation < 0)
|
| - insn |= 0xa00000;
|
| + | ((value & 0x700) << 4)
|
| + | ((value & 0x800) << 15);
|
| + if (relocation < 0)
|
| + insn |= 0xa00000;
|
|
|
| bfd_put_16 (input_bfd, insn >> 16, hit_data);
|
| bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
|
|
|
| - return bfd_reloc_ok;
|
| + return bfd_reloc_ok;
|
| }
|
|
|
| case R_ARM_THM_PC8:
|
| @@ -8650,15 +8699,15 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
|
|
| insn = bfd_get_16 (input_bfd, hit_data);
|
|
|
| - if (globals->use_rel)
|
| - addend = (insn & 0x00ff) << 2;
|
| + if (globals->use_rel)
|
| + addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
|
|
|
| relocation = value + addend;
|
| - relocation -= (input_section->output_section->vma
|
| - + input_section->output_offset
|
| - + rel->r_offset);
|
| + relocation -= Pa (input_section->output_section->vma
|
| + + input_section->output_offset
|
| + + rel->r_offset);
|
|
|
| - value = abs (relocation);
|
| + value = abs (relocation);
|
|
|
| /* We do not check for overflow of this reloc. Although strictly
|
| speaking this is incorrect, it appears to be necessary in order
|
| @@ -8671,7 +8720,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
|
|
| bfd_put_16 (input_bfd, insn, hit_data);
|
|
|
| - return bfd_reloc_ok;
|
| + return bfd_reloc_ok;
|
| }
|
|
|
| case R_ARM_THM_PC12:
|
| @@ -8681,33 +8730,33 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| bfd_signed_vma relocation;
|
|
|
| insn = (bfd_get_16 (input_bfd, hit_data) << 16)
|
| - | bfd_get_16 (input_bfd, hit_data + 2);
|
| + | bfd_get_16 (input_bfd, hit_data + 2);
|
|
|
| - if (globals->use_rel)
|
| - {
|
| - signed_addend = insn & 0xfff;
|
| - if (!(insn & (1 << 23)))
|
| - signed_addend = -signed_addend;
|
| - }
|
| + if (globals->use_rel)
|
| + {
|
| + signed_addend = insn & 0xfff;
|
| + if (!(insn & (1 << 23)))
|
| + signed_addend = -signed_addend;
|
| + }
|
|
|
| relocation = value + signed_addend;
|
| - relocation -= (input_section->output_section->vma
|
| - + input_section->output_offset
|
| - + rel->r_offset);
|
| + relocation -= Pa (input_section->output_section->vma
|
| + + input_section->output_offset
|
| + + rel->r_offset);
|
|
|
| - value = abs (relocation);
|
| + value = abs (relocation);
|
|
|
| - if (value >= 0x1000)
|
| - return bfd_reloc_overflow;
|
| + if (value >= 0x1000)
|
| + return bfd_reloc_overflow;
|
|
|
| insn = (insn & 0xff7ff000) | value;
|
| - if (relocation >= 0)
|
| - insn |= (1 << 23);
|
| + if (relocation >= 0)
|
| + insn |= (1 << 23);
|
|
|
| bfd_put_16 (input_bfd, insn >> 16, hit_data);
|
| bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
|
|
|
| - return bfd_reloc_ok;
|
| + return bfd_reloc_ok;
|
| }
|
|
|
| case R_ARM_THM_XPC22:
|
| @@ -8716,7 +8765,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| /* Thumb BL (branch long instruction). */
|
| {
|
| bfd_vma relocation;
|
| - bfd_vma reloc_sign;
|
| + bfd_vma reloc_sign;
|
| bfd_boolean overflow = FALSE;
|
| bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
|
| bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
|
| @@ -8748,20 +8797,20 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| }
|
|
|
| /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
|
| - with Thumb-1) involving the J1 and J2 bits. */
|
| + with Thumb-1) involving the J1 and J2 bits. */
|
| if (globals->use_rel)
|
| {
|
| - bfd_vma s = (upper_insn & (1 << 10)) >> 10;
|
| - bfd_vma upper = upper_insn & 0x3ff;
|
| - bfd_vma lower = lower_insn & 0x7ff;
|
| + bfd_vma s = (upper_insn & (1 << 10)) >> 10;
|
| + bfd_vma upper = upper_insn & 0x3ff;
|
| + bfd_vma lower = lower_insn & 0x7ff;
|
| bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
|
| bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
|
| - bfd_vma i1 = j1 ^ s ? 0 : 1;
|
| - bfd_vma i2 = j2 ^ s ? 0 : 1;
|
| + bfd_vma i1 = j1 ^ s ? 0 : 1;
|
| + bfd_vma i2 = j2 ^ s ? 0 : 1;
|
|
|
| - addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
|
| - /* Sign extend. */
|
| - addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
|
| + addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
|
| + /* Sign extend. */
|
| + addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
|
|
|
| signed_addend = addend;
|
| }
|
| @@ -8918,14 +8967,14 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| /* Put RELOCATION back into the insn. Assumes two's complement.
|
| We use the Thumb-2 encoding, which is safe even if dealing with
|
| a Thumb-1 instruction by virtue of our overflow check above. */
|
| - reloc_sign = (signed_check < 0) ? 1 : 0;
|
| + reloc_sign = (signed_check < 0) ? 1 : 0;
|
| upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
|
| - | ((relocation >> 12) & 0x3ff)
|
| - | (reloc_sign << 10);
|
| + | ((relocation >> 12) & 0x3ff)
|
| + | (reloc_sign << 10);
|
| lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
|
| - | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
|
| - | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
|
| - | ((relocation >> 1) & 0x7ff);
|
| + | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
|
| + | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
|
| + | ((relocation >> 1) & 0x7ff);
|
|
|
| /* Put the relocated value back in the object file: */
|
| bfd_put_16 (input_bfd, upper_insn, hit_data);
|
| @@ -8979,7 +9028,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| /* ??? Should handle interworking? GCC might someday try to
|
| use this for tail calls. */
|
|
|
| - relocation = value + signed_addend;
|
| + relocation = value + signed_addend;
|
| relocation -= (input_section->output_section->vma
|
| + input_section->output_offset
|
| + rel->r_offset);
|
| @@ -9094,11 +9143,11 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
|
|
| case R_ARM_GOTOFF32:
|
| /* Relocation is relative to the start of the
|
| - global offset table. */
|
| + global offset table. */
|
|
|
| BFD_ASSERT (sgot != NULL);
|
| if (sgot == NULL)
|
| - return bfd_reloc_notsupported;
|
| + return bfd_reloc_notsupported;
|
|
|
| /* If we are addressing a Thumb function, we need to adjust the
|
| address by one, so that attempts to call the function pointer will
|
| @@ -9107,10 +9156,10 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| value += 1;
|
|
|
| /* Note that sgot->output_offset is not involved in this
|
| - calculation. We always want the start of .got. If we
|
| - define _GLOBAL_OFFSET_TABLE in a different way, as is
|
| - permitted by the ABI, we might have to change this
|
| - calculation. */
|
| + calculation. We always want the start of .got. If we
|
| + define _GLOBAL_OFFSET_TABLE in a different way, as is
|
| + permitted by the ABI, we might have to change this
|
| + calculation. */
|
| value -= sgot->output_section->vma;
|
| return _bfd_final_link_relocate (howto, input_bfd, input_section,
|
| contents, rel->r_offset, value,
|
| @@ -9121,7 +9170,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| BFD_ASSERT (sgot != NULL);
|
|
|
| if (sgot == NULL)
|
| - return bfd_reloc_notsupported;
|
| + return bfd_reloc_notsupported;
|
|
|
| *unresolved_reloc_p = FALSE;
|
| value = sgot->output_section->vma;
|
| @@ -9132,7 +9181,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| case R_ARM_GOT32:
|
| case R_ARM_GOT_PREL:
|
| /* Relocation is to the entry for this symbol in the
|
| - global offset table. */
|
| + global offset table. */
|
| if (sgot == NULL)
|
| return bfd_reloc_notsupported;
|
|
|
| @@ -9167,7 +9216,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| {
|
| Elf_Internal_Rela outrel;
|
|
|
| - if (!SYMBOL_REFERENCES_LOCAL (info, h))
|
| + if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
|
| {
|
| /* If the symbol doesn't resolve locally in a static
|
| object, we have an undefined reference. If the
|
| @@ -9185,11 +9234,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| else
|
| {
|
| if (dynreloc_st_type == STT_GNU_IFUNC)
|
| - outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
|
| - else if (info->shared)
|
| - outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
|
| - else
|
| - outrel.r_info = 0;
|
| + outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
|
| + else if (info->shared &&
|
| + (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|
| + || h->root.type != bfd_link_hash_undefweak))
|
| + outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
|
| + else
|
| + outrel.r_info = 0;
|
| outrel.r_addend = dynreloc_value;
|
| }
|
|
|
| @@ -9242,7 +9293,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| + sgot->output_offset
|
| + off);
|
| if (dynreloc_st_type == STT_GNU_IFUNC)
|
| - outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
|
| + outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
|
| else
|
| outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
|
| elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
|
| @@ -9388,7 +9439,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
|
| || info->shared);
|
| BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
|
| - <= globals->root.sgotplt->size);
|
| + <= globals->root.sgotplt->size);
|
|
|
| outrel.r_addend = 0;
|
| outrel.r_offset = (globals->root.sgotplt->output_section->vma
|
| @@ -9401,7 +9452,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| loc = sreloc->contents;
|
| loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
|
| BFD_ASSERT (loc + RELOC_SIZE (globals)
|
| - <= sreloc->contents + sreloc->size);
|
| + <= sreloc->contents + sreloc->size);
|
|
|
| SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
|
|
|
| @@ -9409,7 +9460,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| the relocation index and the top bit set, or zero,
|
| if we're binding now. For locals, it gets the
|
| symbol's offset in the tls section. */
|
| - bfd_put_32 (output_bfd,
|
| + bfd_put_32 (output_bfd,
|
| !h ? value - elf_hash_table (info)->tls_sec->vma
|
| : info->flags & DF_BIND_NOW ? 0
|
| : 0x80000000 | ELF32_R_SYM (outrel.r_info),
|
| @@ -9417,7 +9468,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| + globals->sgotplt_jump_table_size);
|
|
|
| /* Second word in the relocation is always zero. */
|
| - bfd_put_32 (output_bfd, 0,
|
| + bfd_put_32 (output_bfd, 0,
|
| globals->root.sgotplt->contents + offplt
|
| + globals->sgotplt_jump_table_size + 4);
|
| }
|
| @@ -9733,13 +9784,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| + input_section->output_offset + rel->r_offset);
|
|
|
| if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
|
| - return bfd_reloc_overflow;
|
| + return bfd_reloc_overflow;
|
|
|
| if (branch_type == ST_BRANCH_TO_THUMB)
|
| value |= 1;
|
|
|
| if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
|
| - || r_type == R_ARM_MOVT_BREL)
|
| + || r_type == R_ARM_MOVT_BREL)
|
| value >>= 16;
|
|
|
| insn &= 0xfff0f000;
|
| @@ -9783,13 +9834,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| + input_section->output_offset + rel->r_offset);
|
|
|
| if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
|
| - return bfd_reloc_overflow;
|
| + return bfd_reloc_overflow;
|
|
|
| if (branch_type == ST_BRANCH_TO_THUMB)
|
| value |= 1;
|
|
|
| if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
|
| - || r_type == R_ARM_THM_MOVT_BREL)
|
| + || r_type == R_ARM_THM_MOVT_BREL)
|
| value >>= 16;
|
|
|
| insn &= 0xfbf08f00;
|
| @@ -9815,129 +9866,127 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| case R_ARM_ALU_SB_G2:
|
| {
|
| bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
|
| - bfd_vma pc = input_section->output_section->vma
|
| + bfd_vma pc = input_section->output_section->vma
|
| + input_section->output_offset + rel->r_offset;
|
| - /* sb should be the origin of the *segment* containing the symbol.
|
| - It is not clear how to obtain this OS-dependent value, so we
|
| - make an arbitrary choice of zero. */
|
| - bfd_vma sb = 0;
|
| - bfd_vma residual;
|
| - bfd_vma g_n;
|
| + /* sb is the origin of the *segment* containing the symbol. */
|
| + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
|
| + bfd_vma residual;
|
| + bfd_vma g_n;
|
| bfd_signed_vma signed_value;
|
| - int group = 0;
|
| -
|
| - /* Determine which group of bits to select. */
|
| - switch (r_type)
|
| - {
|
| - case R_ARM_ALU_PC_G0_NC:
|
| - case R_ARM_ALU_PC_G0:
|
| - case R_ARM_ALU_SB_G0_NC:
|
| - case R_ARM_ALU_SB_G0:
|
| - group = 0;
|
| - break;
|
| -
|
| - case R_ARM_ALU_PC_G1_NC:
|
| - case R_ARM_ALU_PC_G1:
|
| - case R_ARM_ALU_SB_G1_NC:
|
| - case R_ARM_ALU_SB_G1:
|
| - group = 1;
|
| - break;
|
| -
|
| - case R_ARM_ALU_PC_G2:
|
| - case R_ARM_ALU_SB_G2:
|
| - group = 2;
|
| - break;
|
| -
|
| - default:
|
| - abort ();
|
| - }
|
| -
|
| - /* If REL, extract the addend from the insn. If RELA, it will
|
| - have already been fetched for us. */
|
| + int group = 0;
|
| +
|
| + /* Determine which group of bits to select. */
|
| + switch (r_type)
|
| + {
|
| + case R_ARM_ALU_PC_G0_NC:
|
| + case R_ARM_ALU_PC_G0:
|
| + case R_ARM_ALU_SB_G0_NC:
|
| + case R_ARM_ALU_SB_G0:
|
| + group = 0;
|
| + break;
|
| +
|
| + case R_ARM_ALU_PC_G1_NC:
|
| + case R_ARM_ALU_PC_G1:
|
| + case R_ARM_ALU_SB_G1_NC:
|
| + case R_ARM_ALU_SB_G1:
|
| + group = 1;
|
| + break;
|
| +
|
| + case R_ARM_ALU_PC_G2:
|
| + case R_ARM_ALU_SB_G2:
|
| + group = 2;
|
| + break;
|
| +
|
| + default:
|
| + abort ();
|
| + }
|
| +
|
| + /* If REL, extract the addend from the insn. If RELA, it will
|
| + have already been fetched for us. */
|
| if (globals->use_rel)
|
| - {
|
| - int negative;
|
| - bfd_vma constant = insn & 0xff;
|
| - bfd_vma rotation = (insn & 0xf00) >> 8;
|
| -
|
| - if (rotation == 0)
|
| - signed_addend = constant;
|
| - else
|
| - {
|
| - /* Compensate for the fact that in the instruction, the
|
| - rotation is stored in multiples of 2 bits. */
|
| - rotation *= 2;
|
| -
|
| - /* Rotate "constant" right by "rotation" bits. */
|
| - signed_addend = (constant >> rotation) |
|
| - (constant << (8 * sizeof (bfd_vma) - rotation));
|
| - }
|
| -
|
| - /* Determine if the instruction is an ADD or a SUB.
|
| - (For REL, this determines the sign of the addend.) */
|
| - negative = identify_add_or_sub (insn);
|
| - if (negative == 0)
|
| - {
|
| - (*_bfd_error_handler)
|
| - (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
|
| - input_bfd, input_section,
|
| - (long) rel->r_offset, howto->name);
|
| - return bfd_reloc_overflow;
|
| - }
|
| -
|
| - signed_addend *= negative;
|
| - }
|
| + {
|
| + int negative;
|
| + bfd_vma constant = insn & 0xff;
|
| + bfd_vma rotation = (insn & 0xf00) >> 8;
|
| +
|
| + if (rotation == 0)
|
| + signed_addend = constant;
|
| + else
|
| + {
|
| + /* Compensate for the fact that in the instruction, the
|
| + rotation is stored in multiples of 2 bits. */
|
| + rotation *= 2;
|
| +
|
| + /* Rotate "constant" right by "rotation" bits. */
|
| + signed_addend = (constant >> rotation) |
|
| + (constant << (8 * sizeof (bfd_vma) - rotation));
|
| + }
|
| +
|
| + /* Determine if the instruction is an ADD or a SUB.
|
| + (For REL, this determines the sign of the addend.) */
|
| + negative = identify_add_or_sub (insn);
|
| + if (negative == 0)
|
| + {
|
| + (*_bfd_error_handler)
|
| + (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
|
| + input_bfd, input_section,
|
| + (long) rel->r_offset, howto->name);
|
| + return bfd_reloc_overflow;
|
| + }
|
| +
|
| + signed_addend *= negative;
|
| + }
|
|
|
| /* Compute the value (X) to go in the place. */
|
| - if (r_type == R_ARM_ALU_PC_G0_NC
|
| - || r_type == R_ARM_ALU_PC_G1_NC
|
| - || r_type == R_ARM_ALU_PC_G0
|
| - || r_type == R_ARM_ALU_PC_G1
|
| - || r_type == R_ARM_ALU_PC_G2)
|
| - /* PC relative. */
|
| - signed_value = value - pc + signed_addend;
|
| - else
|
| - /* Section base relative. */
|
| - signed_value = value - sb + signed_addend;
|
| -
|
| - /* If the target symbol is a Thumb function, then set the
|
| - Thumb bit in the address. */
|
| + if (r_type == R_ARM_ALU_PC_G0_NC
|
| + || r_type == R_ARM_ALU_PC_G1_NC
|
| + || r_type == R_ARM_ALU_PC_G0
|
| + || r_type == R_ARM_ALU_PC_G1
|
| + || r_type == R_ARM_ALU_PC_G2)
|
| + /* PC relative. */
|
| + signed_value = value - pc + signed_addend;
|
| + else
|
| + /* Section base relative. */
|
| + signed_value = value - sb + signed_addend;
|
| +
|
| + /* If the target symbol is a Thumb function, then set the
|
| + Thumb bit in the address. */
|
| if (branch_type == ST_BRANCH_TO_THUMB)
|
| signed_value |= 1;
|
|
|
| - /* Calculate the value of the relevant G_n, in encoded
|
| - constant-with-rotation format. */
|
| - g_n = calculate_group_reloc_mask (abs (signed_value), group,
|
| - &residual);
|
| -
|
| - /* Check for overflow if required. */
|
| - if ((r_type == R_ARM_ALU_PC_G0
|
| - || r_type == R_ARM_ALU_PC_G1
|
| - || r_type == R_ARM_ALU_PC_G2
|
| - || r_type == R_ARM_ALU_SB_G0
|
| - || r_type == R_ARM_ALU_SB_G1
|
| - || r_type == R_ARM_ALU_SB_G2) && residual != 0)
|
| - {
|
| - (*_bfd_error_handler)
|
| - (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| - input_bfd, input_section,
|
| - (long) rel->r_offset, abs (signed_value), howto->name);
|
| - return bfd_reloc_overflow;
|
| - }
|
| -
|
| - /* Mask out the value and the ADD/SUB part of the opcode; take care
|
| - not to destroy the S bit. */
|
| - insn &= 0xff1ff000;
|
| -
|
| - /* Set the opcode according to whether the value to go in the
|
| - place is negative. */
|
| - if (signed_value < 0)
|
| - insn |= 1 << 22;
|
| - else
|
| - insn |= 1 << 23;
|
| -
|
| - /* Encode the offset. */
|
| - insn |= g_n;
|
| + /* Calculate the value of the relevant G_n, in encoded
|
| + constant-with-rotation format. */
|
| + g_n = calculate_group_reloc_mask (abs (signed_value), group,
|
| + &residual);
|
| +
|
| + /* Check for overflow if required. */
|
| + if ((r_type == R_ARM_ALU_PC_G0
|
| + || r_type == R_ARM_ALU_PC_G1
|
| + || r_type == R_ARM_ALU_PC_G2
|
| + || r_type == R_ARM_ALU_SB_G0
|
| + || r_type == R_ARM_ALU_SB_G1
|
| + || r_type == R_ARM_ALU_SB_G2) && residual != 0)
|
| + {
|
| + (*_bfd_error_handler)
|
| + (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| + input_bfd, input_section,
|
| + (long) rel->r_offset, abs (signed_value), howto->name);
|
| + return bfd_reloc_overflow;
|
| + }
|
| +
|
| + /* Mask out the value and the ADD/SUB part of the opcode; take care
|
| + not to destroy the S bit. */
|
| + insn &= 0xff1ff000;
|
| +
|
| + /* Set the opcode according to whether the value to go in the
|
| + place is negative. */
|
| + if (signed_value < 0)
|
| + insn |= 1 << 22;
|
| + else
|
| + insn |= 1 << 23;
|
| +
|
| + /* Encode the offset. */
|
| + insn |= g_n;
|
|
|
| bfd_put_32 (input_bfd, insn, hit_data);
|
| }
|
| @@ -9951,76 +10000,77 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| case R_ARM_LDR_SB_G2:
|
| {
|
| bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
|
| - bfd_vma pc = input_section->output_section->vma
|
| + bfd_vma pc = input_section->output_section->vma
|
| + input_section->output_offset + rel->r_offset;
|
| - bfd_vma sb = 0; /* See note above. */
|
| - bfd_vma residual;
|
| + /* sb is the origin of the *segment* containing the symbol. */
|
| + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
|
| + bfd_vma residual;
|
| bfd_signed_vma signed_value;
|
| - int group = 0;
|
| -
|
| - /* Determine which groups of bits to calculate. */
|
| - switch (r_type)
|
| - {
|
| - case R_ARM_LDR_PC_G0:
|
| - case R_ARM_LDR_SB_G0:
|
| - group = 0;
|
| - break;
|
| -
|
| - case R_ARM_LDR_PC_G1:
|
| - case R_ARM_LDR_SB_G1:
|
| - group = 1;
|
| - break;
|
| -
|
| - case R_ARM_LDR_PC_G2:
|
| - case R_ARM_LDR_SB_G2:
|
| - group = 2;
|
| - break;
|
| -
|
| - default:
|
| - abort ();
|
| - }
|
| -
|
| - /* If REL, extract the addend from the insn. If RELA, it will
|
| - have already been fetched for us. */
|
| + int group = 0;
|
| +
|
| + /* Determine which groups of bits to calculate. */
|
| + switch (r_type)
|
| + {
|
| + case R_ARM_LDR_PC_G0:
|
| + case R_ARM_LDR_SB_G0:
|
| + group = 0;
|
| + break;
|
| +
|
| + case R_ARM_LDR_PC_G1:
|
| + case R_ARM_LDR_SB_G1:
|
| + group = 1;
|
| + break;
|
| +
|
| + case R_ARM_LDR_PC_G2:
|
| + case R_ARM_LDR_SB_G2:
|
| + group = 2;
|
| + break;
|
| +
|
| + default:
|
| + abort ();
|
| + }
|
| +
|
| + /* If REL, extract the addend from the insn. If RELA, it will
|
| + have already been fetched for us. */
|
| if (globals->use_rel)
|
| - {
|
| - int negative = (insn & (1 << 23)) ? 1 : -1;
|
| - signed_addend = negative * (insn & 0xfff);
|
| - }
|
| + {
|
| + int negative = (insn & (1 << 23)) ? 1 : -1;
|
| + signed_addend = negative * (insn & 0xfff);
|
| + }
|
|
|
| /* Compute the value (X) to go in the place. */
|
| - if (r_type == R_ARM_LDR_PC_G0
|
| - || r_type == R_ARM_LDR_PC_G1
|
| - || r_type == R_ARM_LDR_PC_G2)
|
| - /* PC relative. */
|
| - signed_value = value - pc + signed_addend;
|
| - else
|
| - /* Section base relative. */
|
| - signed_value = value - sb + signed_addend;
|
| -
|
| - /* Calculate the value of the relevant G_{n-1} to obtain
|
| - the residual at that stage. */
|
| - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| -
|
| - /* Check for overflow. */
|
| - if (residual >= 0x1000)
|
| - {
|
| - (*_bfd_error_handler)
|
| - (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| - input_bfd, input_section,
|
| - (long) rel->r_offset, abs (signed_value), howto->name);
|
| - return bfd_reloc_overflow;
|
| - }
|
| -
|
| - /* Mask out the value and U bit. */
|
| - insn &= 0xff7ff000;
|
| -
|
| - /* Set the U bit if the value to go in the place is non-negative. */
|
| - if (signed_value >= 0)
|
| - insn |= 1 << 23;
|
| -
|
| - /* Encode the offset. */
|
| - insn |= residual;
|
| + if (r_type == R_ARM_LDR_PC_G0
|
| + || r_type == R_ARM_LDR_PC_G1
|
| + || r_type == R_ARM_LDR_PC_G2)
|
| + /* PC relative. */
|
| + signed_value = value - pc + signed_addend;
|
| + else
|
| + /* Section base relative. */
|
| + signed_value = value - sb + signed_addend;
|
| +
|
| + /* Calculate the value of the relevant G_{n-1} to obtain
|
| + the residual at that stage. */
|
| + calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| +
|
| + /* Check for overflow. */
|
| + if (residual >= 0x1000)
|
| + {
|
| + (*_bfd_error_handler)
|
| + (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| + input_bfd, input_section,
|
| + (long) rel->r_offset, abs (signed_value), howto->name);
|
| + return bfd_reloc_overflow;
|
| + }
|
| +
|
| + /* Mask out the value and U bit. */
|
| + insn &= 0xff7ff000;
|
| +
|
| + /* Set the U bit if the value to go in the place is non-negative. */
|
| + if (signed_value >= 0)
|
| + insn |= 1 << 23;
|
| +
|
| + /* Encode the offset. */
|
| + insn |= residual;
|
|
|
| bfd_put_32 (input_bfd, insn, hit_data);
|
| }
|
| @@ -10034,76 +10084,77 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| case R_ARM_LDRS_SB_G2:
|
| {
|
| bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
|
| - bfd_vma pc = input_section->output_section->vma
|
| + bfd_vma pc = input_section->output_section->vma
|
| + input_section->output_offset + rel->r_offset;
|
| - bfd_vma sb = 0; /* See note above. */
|
| - bfd_vma residual;
|
| + /* sb is the origin of the *segment* containing the symbol. */
|
| + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
|
| + bfd_vma residual;
|
| bfd_signed_vma signed_value;
|
| - int group = 0;
|
| -
|
| - /* Determine which groups of bits to calculate. */
|
| - switch (r_type)
|
| - {
|
| - case R_ARM_LDRS_PC_G0:
|
| - case R_ARM_LDRS_SB_G0:
|
| - group = 0;
|
| - break;
|
| -
|
| - case R_ARM_LDRS_PC_G1:
|
| - case R_ARM_LDRS_SB_G1:
|
| - group = 1;
|
| - break;
|
| -
|
| - case R_ARM_LDRS_PC_G2:
|
| - case R_ARM_LDRS_SB_G2:
|
| - group = 2;
|
| - break;
|
| -
|
| - default:
|
| - abort ();
|
| - }
|
| -
|
| - /* If REL, extract the addend from the insn. If RELA, it will
|
| - have already been fetched for us. */
|
| + int group = 0;
|
| +
|
| + /* Determine which groups of bits to calculate. */
|
| + switch (r_type)
|
| + {
|
| + case R_ARM_LDRS_PC_G0:
|
| + case R_ARM_LDRS_SB_G0:
|
| + group = 0;
|
| + break;
|
| +
|
| + case R_ARM_LDRS_PC_G1:
|
| + case R_ARM_LDRS_SB_G1:
|
| + group = 1;
|
| + break;
|
| +
|
| + case R_ARM_LDRS_PC_G2:
|
| + case R_ARM_LDRS_SB_G2:
|
| + group = 2;
|
| + break;
|
| +
|
| + default:
|
| + abort ();
|
| + }
|
| +
|
| + /* If REL, extract the addend from the insn. If RELA, it will
|
| + have already been fetched for us. */
|
| if (globals->use_rel)
|
| - {
|
| - int negative = (insn & (1 << 23)) ? 1 : -1;
|
| - signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
|
| - }
|
| + {
|
| + int negative = (insn & (1 << 23)) ? 1 : -1;
|
| + signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
|
| + }
|
|
|
| /* Compute the value (X) to go in the place. */
|
| - if (r_type == R_ARM_LDRS_PC_G0
|
| - || r_type == R_ARM_LDRS_PC_G1
|
| - || r_type == R_ARM_LDRS_PC_G2)
|
| - /* PC relative. */
|
| - signed_value = value - pc + signed_addend;
|
| - else
|
| - /* Section base relative. */
|
| - signed_value = value - sb + signed_addend;
|
| -
|
| - /* Calculate the value of the relevant G_{n-1} to obtain
|
| - the residual at that stage. */
|
| - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| -
|
| - /* Check for overflow. */
|
| - if (residual >= 0x100)
|
| - {
|
| - (*_bfd_error_handler)
|
| - (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| - input_bfd, input_section,
|
| - (long) rel->r_offset, abs (signed_value), howto->name);
|
| - return bfd_reloc_overflow;
|
| - }
|
| -
|
| - /* Mask out the value and U bit. */
|
| - insn &= 0xff7ff0f0;
|
| -
|
| - /* Set the U bit if the value to go in the place is non-negative. */
|
| - if (signed_value >= 0)
|
| - insn |= 1 << 23;
|
| -
|
| - /* Encode the offset. */
|
| - insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
|
| + if (r_type == R_ARM_LDRS_PC_G0
|
| + || r_type == R_ARM_LDRS_PC_G1
|
| + || r_type == R_ARM_LDRS_PC_G2)
|
| + /* PC relative. */
|
| + signed_value = value - pc + signed_addend;
|
| + else
|
| + /* Section base relative. */
|
| + signed_value = value - sb + signed_addend;
|
| +
|
| + /* Calculate the value of the relevant G_{n-1} to obtain
|
| + the residual at that stage. */
|
| + calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| +
|
| + /* Check for overflow. */
|
| + if (residual >= 0x100)
|
| + {
|
| + (*_bfd_error_handler)
|
| + (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| + input_bfd, input_section,
|
| + (long) rel->r_offset, abs (signed_value), howto->name);
|
| + return bfd_reloc_overflow;
|
| + }
|
| +
|
| + /* Mask out the value and U bit. */
|
| + insn &= 0xff7ff0f0;
|
| +
|
| + /* Set the U bit if the value to go in the place is non-negative. */
|
| + if (signed_value >= 0)
|
| + insn |= 1 << 23;
|
| +
|
| + /* Encode the offset. */
|
| + insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
|
|
|
| bfd_put_32 (input_bfd, insn, hit_data);
|
| }
|
| @@ -10117,78 +10168,79 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
| case R_ARM_LDC_SB_G2:
|
| {
|
| bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
|
| - bfd_vma pc = input_section->output_section->vma
|
| + bfd_vma pc = input_section->output_section->vma
|
| + input_section->output_offset + rel->r_offset;
|
| - bfd_vma sb = 0; /* See note above. */
|
| - bfd_vma residual;
|
| + /* sb is the origin of the *segment* containing the symbol. */
|
| + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
|
| + bfd_vma residual;
|
| bfd_signed_vma signed_value;
|
| - int group = 0;
|
| -
|
| - /* Determine which groups of bits to calculate. */
|
| - switch (r_type)
|
| - {
|
| - case R_ARM_LDC_PC_G0:
|
| - case R_ARM_LDC_SB_G0:
|
| - group = 0;
|
| - break;
|
| -
|
| - case R_ARM_LDC_PC_G1:
|
| - case R_ARM_LDC_SB_G1:
|
| - group = 1;
|
| - break;
|
| -
|
| - case R_ARM_LDC_PC_G2:
|
| - case R_ARM_LDC_SB_G2:
|
| - group = 2;
|
| - break;
|
| -
|
| - default:
|
| - abort ();
|
| - }
|
| -
|
| - /* If REL, extract the addend from the insn. If RELA, it will
|
| - have already been fetched for us. */
|
| + int group = 0;
|
| +
|
| + /* Determine which groups of bits to calculate. */
|
| + switch (r_type)
|
| + {
|
| + case R_ARM_LDC_PC_G0:
|
| + case R_ARM_LDC_SB_G0:
|
| + group = 0;
|
| + break;
|
| +
|
| + case R_ARM_LDC_PC_G1:
|
| + case R_ARM_LDC_SB_G1:
|
| + group = 1;
|
| + break;
|
| +
|
| + case R_ARM_LDC_PC_G2:
|
| + case R_ARM_LDC_SB_G2:
|
| + group = 2;
|
| + break;
|
| +
|
| + default:
|
| + abort ();
|
| + }
|
| +
|
| + /* If REL, extract the addend from the insn. If RELA, it will
|
| + have already been fetched for us. */
|
| if (globals->use_rel)
|
| - {
|
| - int negative = (insn & (1 << 23)) ? 1 : -1;
|
| - signed_addend = negative * ((insn & 0xff) << 2);
|
| - }
|
| + {
|
| + int negative = (insn & (1 << 23)) ? 1 : -1;
|
| + signed_addend = negative * ((insn & 0xff) << 2);
|
| + }
|
|
|
| /* Compute the value (X) to go in the place. */
|
| - if (r_type == R_ARM_LDC_PC_G0
|
| - || r_type == R_ARM_LDC_PC_G1
|
| - || r_type == R_ARM_LDC_PC_G2)
|
| - /* PC relative. */
|
| - signed_value = value - pc + signed_addend;
|
| - else
|
| - /* Section base relative. */
|
| - signed_value = value - sb + signed_addend;
|
| -
|
| - /* Calculate the value of the relevant G_{n-1} to obtain
|
| - the residual at that stage. */
|
| - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| -
|
| - /* Check for overflow. (The absolute value to go in the place must be
|
| - divisible by four and, after having been divided by four, must
|
| - fit in eight bits.) */
|
| - if ((residual & 0x3) != 0 || residual >= 0x400)
|
| - {
|
| - (*_bfd_error_handler)
|
| - (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| - input_bfd, input_section,
|
| - (long) rel->r_offset, abs (signed_value), howto->name);
|
| - return bfd_reloc_overflow;
|
| - }
|
| -
|
| - /* Mask out the value and U bit. */
|
| - insn &= 0xff7fff00;
|
| -
|
| - /* Set the U bit if the value to go in the place is non-negative. */
|
| - if (signed_value >= 0)
|
| - insn |= 1 << 23;
|
| -
|
| - /* Encode the offset. */
|
| - insn |= residual >> 2;
|
| + if (r_type == R_ARM_LDC_PC_G0
|
| + || r_type == R_ARM_LDC_PC_G1
|
| + || r_type == R_ARM_LDC_PC_G2)
|
| + /* PC relative. */
|
| + signed_value = value - pc + signed_addend;
|
| + else
|
| + /* Section base relative. */
|
| + signed_value = value - sb + signed_addend;
|
| +
|
| + /* Calculate the value of the relevant G_{n-1} to obtain
|
| + the residual at that stage. */
|
| + calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
| +
|
| + /* Check for overflow. (The absolute value to go in the place must be
|
| + divisible by four and, after having been divided by four, must
|
| + fit in eight bits.) */
|
| + if ((residual & 0x3) != 0 || residual >= 0x400)
|
| + {
|
| + (*_bfd_error_handler)
|
| + (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
| + input_bfd, input_section,
|
| + (long) rel->r_offset, abs (signed_value), howto->name);
|
| + return bfd_reloc_overflow;
|
| + }
|
| +
|
| + /* Mask out the value and U bit. */
|
| + insn &= 0xff7fff00;
|
| +
|
| + /* Set the U bit if the value to go in the place is non-negative. */
|
| + if (signed_value >= 0)
|
| + insn |= 1 << 23;
|
| +
|
| + /* Encode the offset. */
|
| + insn |= residual >> 2;
|
|
|
| bfd_put_32 (input_bfd, insn, hit_data);
|
| }
|
| @@ -10340,8 +10392,8 @@ elf32_arm_relocate_section (bfd * output_bfd,
|
| r_type = arm_real_reloc_type (globals, r_type);
|
|
|
| if ( r_type == R_ARM_GNU_VTENTRY
|
| - || r_type == R_ARM_GNU_VTINHERIT)
|
| - continue;
|
| + || r_type == R_ARM_GNU_VTINHERIT)
|
| + continue;
|
|
|
| bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
|
| howto = bfd_reloc.howto;
|
| @@ -10474,12 +10526,12 @@ elf32_arm_relocate_section (bfd * output_bfd,
|
| }
|
| else
|
| {
|
| - bfd_boolean warned;
|
| + bfd_boolean warned, ignored;
|
|
|
| RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
|
| r_symndx, symtab_hdr, sym_hashes,
|
| h, sec, relocation,
|
| - unresolved_reloc, warned);
|
| + unresolved_reloc, warned, ignored);
|
|
|
| sym_type = h->type;
|
| }
|
| @@ -10534,10 +10586,10 @@ elf32_arm_relocate_section (bfd * output_bfd,
|
| }
|
|
|
| /* We call elf32_arm_final_link_relocate unless we're completely
|
| - done, i.e., the relaxation produced the final output we want,
|
| - and we won't let anybody mess with it. Also, we have to do
|
| - addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
|
| - both in relaxed and non-relaxed cases */
|
| + done, i.e., the relaxation produced the final output we want,
|
| + and we won't let anybody mess with it. Also, we have to do
|
| + addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
|
| + both in relaxed and non-relaxed cases */
|
| if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
|
| || (IS_ARM_TLS_GNU_RELOC (r_type)
|
| && !((h ? elf32_arm_hash_entry (h)->tls_type :
|
| @@ -10565,8 +10617,8 @@ elf32_arm_relocate_section (bfd * output_bfd,
|
| because such sections are not SEC_ALLOC and thus ld.so will
|
| not process them. */
|
| if (unresolved_reloc
|
| - && !((input_section->flags & SEC_DEBUGGING) != 0
|
| - && h->def_dynamic)
|
| + && !((input_section->flags & SEC_DEBUGGING) != 0
|
| + && h->def_dynamic)
|
| && _bfd_elf_section_offset (output_bfd, info, input_section,
|
| rel->r_offset) != (bfd_vma) -1)
|
| {
|
| @@ -10713,7 +10765,7 @@ insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
|
|
|
| 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
|
| 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
|
| - codes which have been inlined into the index).
|
| + codes which have been inlined into the index).
|
|
|
| If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
|
|
|
| @@ -10739,7 +10791,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
|
| asection *sec;
|
|
|
| for (sec = inp->sections; sec != NULL; sec = sec->next)
|
| - {
|
| + {
|
| struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
|
| Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
|
|
|
| @@ -10749,15 +10801,15 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
|
| if (elf_sec->linked_to)
|
| {
|
| Elf_Internal_Shdr *linked_hdr
|
| - = &elf_section_data (elf_sec->linked_to)->this_hdr;
|
| + = &elf_section_data (elf_sec->linked_to)->this_hdr;
|
| struct _arm_elf_section_data *linked_sec_arm_data
|
| - = get_arm_elf_section_data (linked_hdr->bfd_section);
|
| + = get_arm_elf_section_data (linked_hdr->bfd_section);
|
|
|
| if (linked_sec_arm_data == NULL)
|
| - continue;
|
| + continue;
|
|
|
| /* Link this .ARM.exidx section back from the text section it
|
| - describes. */
|
| + describes. */
|
| linked_sec_arm_data->u.text.arm_exidx_sec = sec;
|
| }
|
| }
|
| @@ -10782,7 +10834,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
|
| bfd *ibfd;
|
|
|
| if (arm_data == NULL)
|
| - continue;
|
| + continue;
|
|
|
| exidx_sec = arm_data->u.text.arm_exidx_sec;
|
| if (exidx_sec == NULL)
|
| @@ -10806,11 +10858,11 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
|
|
|
| hdr = &elf_section_data (exidx_sec)->this_hdr;
|
| if (hdr->sh_type != SHT_ARM_EXIDX)
|
| - continue;
|
| + continue;
|
|
|
| exidx_arm_data = get_arm_elf_section_data (exidx_sec);
|
| if (exidx_arm_data == NULL)
|
| - continue;
|
| + continue;
|
|
|
| ibfd = exidx_sec->owner;
|
|
|
| @@ -10860,7 +10912,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
|
|
|
| /* Free contents if we allocated it ourselves. */
|
| if (contents != hdr->contents)
|
| - free (contents);
|
| + free (contents);
|
|
|
| /* Record edits to be applied later (in elf32_arm_write_section). */
|
| exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
|
| @@ -10986,6 +11038,20 @@ bfd_arm_get_mach_from_attributes (bfd * abfd)
|
|
|
| if (strcmp (name, "IWMMXT") == 0)
|
| return bfd_mach_arm_iWMMXt;
|
| +
|
| + if (strcmp (name, "XSCALE") == 0)
|
| + {
|
| + int wmmx;
|
| +
|
| + BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
|
| + wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
|
| + switch (wmmx)
|
| + {
|
| + case 1: return bfd_mach_arm_iWMMXt;
|
| + case 2: return bfd_mach_arm_iWMMXt2;
|
| + default: return bfd_mach_arm_XScale;
|
| + }
|
| + }
|
| }
|
|
|
| return bfd_mach_arm_5TE;
|
| @@ -11073,7 +11139,7 @@ elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
|
| return FALSE;
|
|
|
| /* If the src and dest have different interworking flags
|
| - then turn off the interworking bit. */
|
| + then turn off the interworking bit. */
|
| if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
|
| {
|
| if (out_flags & EF_ARM_INTERWORK)
|
| @@ -11327,6 +11393,24 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
|
| T(V7E_M), /* V6S_M. */
|
| T(V7E_M) /* V7E_M. */
|
| };
|
| + const int v8[] =
|
| + {
|
| + T(V8), /* PRE_V4. */
|
| + T(V8), /* V4. */
|
| + T(V8), /* V4T. */
|
| + T(V8), /* V5T. */
|
| + T(V8), /* V5TE. */
|
| + T(V8), /* V5TEJ. */
|
| + T(V8), /* V6. */
|
| + T(V8), /* V6KZ. */
|
| + T(V8), /* V6T2. */
|
| + T(V8), /* V6K. */
|
| + T(V8), /* V7. */
|
| + T(V8), /* V6_M. */
|
| + T(V8), /* V6S_M. */
|
| + T(V8), /* V7E_M. */
|
| + T(V8) /* V8. */
|
| + };
|
| const int v4t_plus_v6_m[] =
|
| {
|
| -1, /* PRE_V4. */
|
| @@ -11343,6 +11427,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
|
| T(V6_M), /* V6_M. */
|
| T(V6S_M), /* V6S_M. */
|
| T(V7E_M), /* V7E_M. */
|
| + T(V8), /* V8. */
|
| T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
|
| };
|
| const int *comb[] =
|
| @@ -11353,6 +11438,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
|
| v6_m,
|
| v6s_m,
|
| v7e_m,
|
| + v8,
|
| /* Pseudo-architecture. */
|
| v4t_plus_v6_m
|
| };
|
| @@ -11485,7 +11571,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| {
|
| if (out_attr[Tag_MPextension_use].i != 0
|
| && out_attr[Tag_MPextension_use_legacy].i
|
| - != out_attr[Tag_MPextension_use].i)
|
| + != out_attr[Tag_MPextension_use].i)
|
| {
|
| _bfd_error_handler
|
| (_("Error: %B has both the current and legacy "
|
| @@ -11554,7 +11640,8 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| "ARM v6K",
|
| "ARM v7",
|
| "ARM v6-M",
|
| - "ARM v6S-M"
|
| + "ARM v6S-M",
|
| + "ARM v8"
|
| };
|
|
|
| /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
|
| @@ -11672,7 +11759,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| /* 0 will merge with anything.
|
| 'A' and 'S' merge to 'A'.
|
| 'R' and 'S' merge to 'R'.
|
| - 'M' and 'A|R|S' is an error. */
|
| + 'M' and 'A|R|S' is an error. */
|
| if (out_attr[i].i == 0
|
| || (out_attr[i].i == 'S'
|
| && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
|
| @@ -11699,11 +11786,12 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| when it's 0. It might mean absence of FP hardware if
|
| Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
|
|
|
| +#define VFP_VERSION_COUNT 8
|
| static const struct
|
| {
|
| int ver;
|
| int regs;
|
| - } vfp_versions[7] =
|
| + } vfp_versions[VFP_VERSION_COUNT] =
|
| {
|
| {0, 0},
|
| {1, 16},
|
| @@ -11711,7 +11799,8 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| {3, 32},
|
| {3, 16},
|
| {4, 32},
|
| - {4, 16}
|
| + {4, 16},
|
| + {8, 32}
|
| };
|
| int ver;
|
| int regs;
|
| @@ -11751,9 +11840,10 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
|
|
| /* Now we can handle Tag_FP_arch. */
|
|
|
| - /* Values greater than 6 aren't defined, so just pick the
|
| - biggest */
|
| - if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
|
| + /* Values of VFP_VERSION_COUNT or more aren't defined, so just
|
| + pick the biggest. */
|
| + if (in_attr[i].i >= VFP_VERSION_COUNT
|
| + && in_attr[i].i > out_attr[i].i)
|
| {
|
| out_attr[i] = in_attr[i];
|
| break;
|
| @@ -11767,8 +11857,8 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| if (regs < vfp_versions[out_attr[i].i].regs)
|
| regs = vfp_versions[out_attr[i].i].regs;
|
| /* This assumes all possible supersets are also a valid
|
| - options. */
|
| - for (newval = 6; newval > 0; newval--)
|
| + options. */
|
| + for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
|
| {
|
| if (regs == vfp_versions[newval].regs
|
| && ver == vfp_versions[newval].ver)
|
| @@ -11783,7 +11873,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
|
| else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
|
| {
|
| /* It's sometimes ok to mix different configs, so this is only
|
| - a warning. */
|
| + a warning. */
|
| _bfd_error_handler
|
| (_("Warning: %B: Conflicting platform configuration"), ibfd);
|
| }
|
| @@ -12086,6 +12176,15 @@ elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
|
|
|
| case EF_ARM_EABI_VER5:
|
| fprintf (file, _(" [Version5 EABI]"));
|
| +
|
| + if (flags & EF_ARM_ABI_FLOAT_SOFT)
|
| + fprintf (file, _(" [soft-float ABI]"));
|
| +
|
| + if (flags & EF_ARM_ABI_FLOAT_HARD)
|
| + fprintf (file, _(" [hard-float ABI]"));
|
| +
|
| + flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
|
| +
|
| eabi:
|
| if (flags & EF_ARM_BE8)
|
| fprintf (file, _(" [BE8]"));
|
| @@ -12444,6 +12543,10 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
|
| while (h->root.type == bfd_link_hash_indirect
|
| || h->root.type == bfd_link_hash_warning)
|
| h = (struct elf_link_hash_entry *) h->root.u.i.link;
|
| +
|
| + /* PR15323, ref flags aren't set for references in the
|
| + same object. */
|
| + h->root.non_ir_ref = 1;
|
| }
|
| }
|
|
|
| @@ -12456,7 +12559,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
|
| /* Could be done earlier, if h were already available. */
|
| r_type = elf32_arm_tls_transition (info, r_type, h);
|
| switch (r_type)
|
| - {
|
| + {
|
| case R_ARM_GOT32:
|
| case R_ARM_GOT_PREL:
|
| case R_ARM_TLS_GD32:
|
| @@ -12499,7 +12602,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
|
| }
|
|
|
| /* If a variable is accessed with both tls methods, two
|
| - slots may be created. */
|
| + slots may be created. */
|
| if (GOT_TLS_GD_ANY_P (old_tls_type)
|
| && GOT_TLS_GD_ANY_P (tls_type))
|
| tls_type |= old_tls_type;
|
| @@ -12512,9 +12615,9 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
|
| tls_type |= old_tls_type;
|
|
|
| /* If the symbol is accessed in both IE and GDESC
|
| - method, we're able to relax. Turn off the GDESC flag,
|
| - without messing up with any other kind of tls types
|
| - that may be involved */
|
| + method, we're able to relax. Turn off the GDESC flag,
|
| + without messing up with any other kind of tls types
|
| + that may be involved */
|
| if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
|
| tls_type &= ~GOT_TLS_GDESC;
|
|
|
| @@ -12611,22 +12714,22 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
|
| may_need_local_target_p = TRUE;
|
| break;
|
|
|
| - /* This relocation describes the C++ object vtable hierarchy.
|
| - Reconstruct it for later use during GC. */
|
| - case R_ARM_GNU_VTINHERIT:
|
| - if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
|
| - return FALSE;
|
| - break;
|
| -
|
| - /* This relocation describes which C++ vtable entries are actually
|
| - used. Record for later use during GC. */
|
| - case R_ARM_GNU_VTENTRY:
|
| - BFD_ASSERT (h != NULL);
|
| - if (h != NULL
|
| - && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
|
| - return FALSE;
|
| - break;
|
| - }
|
| + /* This relocation describes the C++ object vtable hierarchy.
|
| + Reconstruct it for later use during GC. */
|
| + case R_ARM_GNU_VTINHERIT:
|
| + if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
|
| + return FALSE;
|
| + break;
|
| +
|
| + /* This relocation describes which C++ vtable entries are actually
|
| + used. Record for later use during GC. */
|
| + case R_ARM_GNU_VTENTRY:
|
| + BFD_ASSERT (h != NULL);
|
| + if (h != NULL
|
| + && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
|
| + return FALSE;
|
| + break;
|
| + }
|
|
|
| if (h != NULL)
|
| {
|
| @@ -12884,9 +12987,9 @@ elf32_arm_find_nearest_line (bfd * abfd,
|
| /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
|
|
|
| if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
|
| - section, symbols, offset,
|
| + section, symbols, offset,
|
| filename_ptr, functionname_ptr,
|
| - line_ptr, 0,
|
| + line_ptr, NULL, 0,
|
| & elf_tdata (abfd)->dwarf2_find_line_info))
|
| {
|
| if (!*functionname_ptr)
|
| @@ -13195,18 +13298,18 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
|
| s->size += 4;
|
| else
|
| {
|
| - if (tls_type & GOT_TLS_GDESC)
|
| - {
|
| + if (tls_type & GOT_TLS_GDESC)
|
| + {
|
| /* R_ARM_TLS_DESC needs 2 GOT slots. */
|
| - eh->tlsdesc_got
|
| + eh->tlsdesc_got
|
| = (htab->root.sgotplt->size
|
| - elf32_arm_compute_jump_table_size (htab));
|
| - htab->root.sgotplt->size += 8;
|
| - h->got.offset = (bfd_vma) -2;
|
| + htab->root.sgotplt->size += 8;
|
| + h->got.offset = (bfd_vma) -2;
|
| /* plt.got_offset needs to know there's a TLS_DESC
|
| reloc in the middle of .got.plt. */
|
| - htab->num_tls_desc++;
|
| - }
|
| + htab->num_tls_desc++;
|
| + }
|
|
|
| if (tls_type & GOT_TLS_GD)
|
| {
|
| @@ -13253,7 +13356,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
|
| if ((tls_type & GOT_TLS_GD) && indx != 0)
|
| elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
|
| }
|
| - else if (!SYMBOL_REFERENCES_LOCAL (info, h))
|
| + else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
|
| {
|
| if (htab->root.dynamic_sections_created)
|
| /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
|
| @@ -13265,7 +13368,8 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
|
| they all resolve dynamically instead. Reserve room for the
|
| GOT entry's R_ARM_IRELATIVE relocation. */
|
| elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
|
| - else if (info->shared)
|
| + else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|
| + || h->root.type != bfd_link_hash_undefweak))
|
| /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
|
| elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
|
| }
|
| @@ -13318,11 +13422,11 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
|
| if (info->shared || htab->root.is_relocatable_executable)
|
| {
|
| /* The only relocs that use pc_count are R_ARM_REL32 and
|
| - R_ARM_REL32_NOI, which will appear on something like
|
| - ".long foo - .". We want calls to protected symbols to resolve
|
| - directly to the function rather than going via the plt. If people
|
| - want function pointer comparisons to work as expected then they
|
| - should avoid writing assembly like ".long foo - .". */
|
| + R_ARM_REL32_NOI, which will appear on something like
|
| + ".long foo - .". We want calls to protected symbols to resolve
|
| + directly to the function rather than going via the plt. If people
|
| + want function pointer comparisons to work as expected then they
|
| + should avoid writing assembly like ".long foo - .". */
|
| if (SYMBOL_CALLS_LOCAL (info, h))
|
| {
|
| struct elf_dyn_relocs **pp;
|
| @@ -13352,7 +13456,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
|
| }
|
|
|
| /* Also discard relocs on undefined weak syms with non-default
|
| - visibility. */
|
| + visibility. */
|
| if (eh->dyn_relocs != NULL
|
| && h->root.type == bfd_link_hash_undefweak)
|
| {
|
| @@ -13523,7 +13627,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
| struct elf_dyn_relocs *p;
|
|
|
| for (p = (struct elf_dyn_relocs *)
|
| - elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
|
| + elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
|
| {
|
| if (!bfd_is_abs_section (p->sec)
|
| && bfd_is_abs_section (p->sec->output_section))
|
| @@ -13619,7 +13723,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
| *local_got = (bfd_vma) -2;
|
| /* plt.got_offset needs to know there's a TLS_DESC
|
| reloc in the middle of .got.plt. */
|
| - htab->num_tls_desc++;
|
| + htab->num_tls_desc++;
|
| }
|
| if (*local_tls_type & GOT_TLS_IE)
|
| s->size += 4;
|
| @@ -13643,14 +13747,18 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
| && (local_iplt == NULL
|
| || local_iplt->arm.noncall_refcount == 0))
|
| elf32_arm_allocate_irelocs (info, srel, 1);
|
| - else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
|
| - || *local_tls_type & GOT_TLS_GD)
|
| - elf32_arm_allocate_dynrelocs (info, srel, 1);
|
| -
|
| - if (info->shared && *local_tls_type & GOT_TLS_GDESC)
|
| + else if (info->shared || output_bfd->flags & DYNAMIC)
|
| {
|
| - elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
|
| - htab->tls_trampoline = -1;
|
| + if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
|
| + || *local_tls_type & GOT_TLS_GD)
|
| + elf32_arm_allocate_dynrelocs (info, srel, 1);
|
| +
|
| + if (info->shared && *local_tls_type & GOT_TLS_GDESC)
|
| + {
|
| + elf32_arm_allocate_dynrelocs (info,
|
| + htab->root.srelplt, 1);
|
| + htab->tls_trampoline = -1;
|
| + }
|
| }
|
| }
|
| else
|
| @@ -13685,8 +13793,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
|
|
| if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
|
| || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
|
| - /* xgettext:c-format */
|
| - _bfd_error_handler (_("Errors encountered processing file %s"),
|
| + /* xgettext:c-format */
|
| + _bfd_error_handler (_("Errors encountered processing file %s"),
|
| ibfd->filename);
|
| }
|
|
|
| @@ -13710,7 +13818,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
| htab->root.splt->size += htab->plt_entry_size;
|
|
|
| /* If we're not using lazy TLS relocations, don't generate the
|
| - PLT and GOT entries they require. */
|
| + PLT and GOT entries they require. */
|
| if (!(info->flags & DF_BIND_NOW))
|
| {
|
| htab->dt_tlsdesc_got = htab->root.sgot->size;
|
| @@ -13864,7 +13972,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
|
|
|
| static bfd_boolean
|
| elf32_arm_always_size_sections (bfd *output_bfd,
|
| - struct bfd_link_info *info)
|
| + struct bfd_link_info *info)
|
| {
|
| asection *tls_sec;
|
|
|
| @@ -13881,22 +13989,22 @@ elf32_arm_always_size_sections (bfd *output_bfd,
|
| (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
|
|
|
| if (tlsbase)
|
| - {
|
| - struct bfd_link_hash_entry *bh = NULL;
|
| + {
|
| + struct bfd_link_hash_entry *bh = NULL;
|
| const struct elf_backend_data *bed
|
| - = get_elf_backend_data (output_bfd);
|
| + = get_elf_backend_data (output_bfd);
|
|
|
| - if (!(_bfd_generic_link_add_one_symbol
|
| + if (!(_bfd_generic_link_add_one_symbol
|
| (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
|
| tls_sec, 0, NULL, FALSE,
|
| bed->collect, &bh)))
|
| return FALSE;
|
|
|
| - tlsbase->type = STT_TLS;
|
| - tlsbase = (struct elf_link_hash_entry *)bh;
|
| - tlsbase->def_regular = 1;
|
| - tlsbase->other = STV_HIDDEN;
|
| - (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
|
| + tlsbase->type = STT_TLS;
|
| + tlsbase = (struct elf_link_hash_entry *)bh;
|
| + tlsbase->def_regular = 1;
|
| + tlsbase->other = STV_HIDDEN;
|
| + (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
|
| }
|
| }
|
| return TRUE;
|
| @@ -13925,8 +14033,9 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
|
| if (!eh->is_iplt)
|
| {
|
| BFD_ASSERT (h->dynindx != -1);
|
| - elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
|
| - h->dynindx, 0);
|
| + if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
|
| + h->dynindx, 0))
|
| + return FALSE;
|
| }
|
|
|
| if (!h->def_regular)
|
| @@ -13979,7 +14088,7 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
|
| /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
|
| the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
|
| to the ".got" section. */
|
| - if (strcmp (h->root.root.string, "_DYNAMIC") == 0
|
| + if (h == htab->root.hdynamic
|
| || (!htab->vxworks_p && h == htab->root.hgot))
|
| sym->st_shndx = SHN_ABS;
|
|
|
| @@ -14004,6 +14113,38 @@ arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
|
| }
|
| }
|
|
|
| +/* Install the special first PLT entry for elf32-arm-nacl. Unlike
|
| + other variants, NaCl needs this entry in a static executable's
|
| + .iplt too. When we're handling that case, GOT_DISPLACEMENT is
|
| + zero. For .iplt really only the last bundle is useful, and .iplt
|
| + could have a shorter first entry, with each individual PLT entry's
|
| + relative branch calculated differently so it targets the last
|
| + bundle instead of the instruction before it (labelled .Lplt_tail
|
| + above). But it's simpler to keep the size and layout of PLT0
|
| + consistent with the dynamic case, at the cost of some dead code at
|
| + the start of .iplt and the one dead store to the stack at the start
|
| + of .Lplt_tail. */
|
| +static void
|
| +arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
|
| + asection *plt, bfd_vma got_displacement)
|
| +{
|
| + unsigned int i;
|
| +
|
| + put_arm_insn (htab, output_bfd,
|
| + elf32_arm_nacl_plt0_entry[0]
|
| + | arm_movw_immediate (got_displacement),
|
| + plt->contents + 0);
|
| + put_arm_insn (htab, output_bfd,
|
| + elf32_arm_nacl_plt0_entry[1]
|
| + | arm_movt_immediate (got_displacement),
|
| + plt->contents + 4);
|
| +
|
| + for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
|
| + put_arm_insn (htab, output_bfd,
|
| + elf32_arm_nacl_plt0_entry[i],
|
| + plt->contents + (i * 4));
|
| +}
|
| +
|
| /* Finish up the dynamic sections. */
|
|
|
| static bfd_boolean
|
| @@ -14083,7 +14224,14 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
|
| name = RELOC_SECTION (htab, ".plt");
|
| get_vma:
|
| s = bfd_get_section_by_name (output_bfd, name);
|
| - BFD_ASSERT (s != NULL);
|
| + if (s == NULL)
|
| + {
|
| + /* PR ld/14397: Issue an error message if a required section is missing. */
|
| + (*_bfd_error_handler)
|
| + (_("error: required section '%s' not found in the linker script"), name);
|
| + bfd_set_error (bfd_error_invalid_operation);
|
| + return FALSE;
|
| + }
|
| if (!htab->symbian_p)
|
| dyn.d_un.d_ptr = s->vma;
|
| else
|
| @@ -14162,16 +14310,16 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
|
| break;
|
|
|
| case DT_TLSDESC_PLT:
|
| - s = htab->root.splt;
|
| + s = htab->root.splt;
|
| dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
|
| + htab->dt_tlsdesc_plt);
|
| bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
|
| break;
|
|
|
| case DT_TLSDESC_GOT:
|
| - s = htab->root.sgot;
|
| + s = htab->root.sgot;
|
| dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
|
| - + htab->dt_tlsdesc_got);
|
| + + htab->dt_tlsdesc_got);
|
| bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
|
| break;
|
|
|
| @@ -14235,24 +14383,8 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
|
| htab->srelplt2->contents);
|
| }
|
| else if (htab->nacl_p)
|
| - {
|
| - unsigned int i;
|
| -
|
| - got_displacement = got_address + 8 - (plt_address + 16);
|
| -
|
| - put_arm_insn (htab, output_bfd,
|
| - elf32_arm_nacl_plt0_entry[0]
|
| - | arm_movw_immediate (got_displacement),
|
| - splt->contents + 0);
|
| - put_arm_insn (htab, output_bfd,
|
| - elf32_arm_nacl_plt0_entry[1]
|
| - | arm_movt_immediate (got_displacement),
|
| - splt->contents + 4);
|
| - for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
|
| - put_arm_insn (htab, output_bfd,
|
| - elf32_arm_nacl_plt0_entry[i],
|
| - splt->contents + (i * 4));
|
| - }
|
| + arm_nacl_put_plt0 (htab, output_bfd, splt,
|
| + got_address + 8 - (plt_address + 16));
|
| else
|
| {
|
| got_displacement = got_address - (plt_address + 16);
|
| @@ -14345,6 +14477,10 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
|
| }
|
| }
|
|
|
| + if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
|
| + /* NaCl uses a special first entry in .iplt too. */
|
| + arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
|
| +
|
| /* Fill in the first three entries in the global offset table. */
|
| if (sgot)
|
| {
|
| @@ -14386,10 +14522,22 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT
|
| if (globals != NULL && globals->byteswap_code)
|
| i_ehdrp->e_flags |= EF_ARM_BE8;
|
| }
|
| +
|
| + if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
|
| + && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
|
| + {
|
| + int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
|
| + if (abi)
|
| + i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
|
| + else
|
| + i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
|
| + }
|
| }
|
|
|
| static enum elf_reloc_type_class
|
| -elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
|
| +elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
|
| + const asection *rel_sec ATTRIBUTE_UNUSED,
|
| + const Elf_Internal_Rela *rela)
|
| {
|
| switch ((int) ELF32_R_TYPE (rela->r_info))
|
| {
|
| @@ -14916,6 +15064,15 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
|
| #endif
|
| }
|
| }
|
| + if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
|
| + {
|
| + /* NaCl uses a special first entry in .iplt too. */
|
| + osi.sec = htab->root.iplt;
|
| + osi.sec_shndx = (_bfd_elf_section_from_bfd_section
|
| + (output_bfd, osi.sec->output_section));
|
| + if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
|
| + return FALSE;
|
| + }
|
| if ((htab->root.splt && htab->root.splt->size > 0)
|
| || (htab->root.iplt && htab->root.iplt->size > 0))
|
| {
|
| @@ -15052,7 +15209,7 @@ struct a8_branch_to_stub_data
|
|
|
| static bfd_boolean
|
| make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
|
| - void *in_arg)
|
| + void *in_arg)
|
| {
|
| struct elf32_arm_stub_hash_entry *stub_entry;
|
| struct a8_branch_to_stub_data *data;
|
| @@ -15191,82 +15348,82 @@ elf32_arm_write_section (bfd *output_bfd,
|
| unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
|
|
|
| for (errnode = arm_data->erratumlist; errnode != 0;
|
| - errnode = errnode->next)
|
| - {
|
| - bfd_vma target = errnode->vma - offset;
|
| -
|
| - switch (errnode->type)
|
| - {
|
| - case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
|
| - {
|
| - bfd_vma branch_to_veneer;
|
| - /* Original condition code of instruction, plus bit mask for
|
| - ARM B instruction. */
|
| - unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
|
| - | 0x0a000000;
|
| + errnode = errnode->next)
|
| + {
|
| + bfd_vma target = errnode->vma - offset;
|
| +
|
| + switch (errnode->type)
|
| + {
|
| + case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
|
| + {
|
| + bfd_vma branch_to_veneer;
|
| + /* Original condition code of instruction, plus bit mask for
|
| + ARM B instruction. */
|
| + unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
|
| + | 0x0a000000;
|
|
|
| /* The instruction is before the label. */
|
| target -= 4;
|
|
|
| /* Above offset included in -4 below. */
|
| branch_to_veneer = errnode->u.b.veneer->vma
|
| - - errnode->vma - 4;
|
| + - errnode->vma - 4;
|
|
|
| if ((signed) branch_to_veneer < -(1 << 25)
|
| || (signed) branch_to_veneer >= (1 << 25))
|
| (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
|
| "range"), output_bfd);
|
|
|
| - insn |= (branch_to_veneer >> 2) & 0xffffff;
|
| - contents[endianflip ^ target] = insn & 0xff;
|
| - contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
|
| - contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
|
| - contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
|
| - }
|
| - break;
|
| + insn |= (branch_to_veneer >> 2) & 0xffffff;
|
| + contents[endianflip ^ target] = insn & 0xff;
|
| + contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
|
| + contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
|
| + contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
|
| + }
|
| + break;
|
|
|
| case VFP11_ERRATUM_ARM_VENEER:
|
| - {
|
| - bfd_vma branch_from_veneer;
|
| - unsigned int insn;
|
| + {
|
| + bfd_vma branch_from_veneer;
|
| + unsigned int insn;
|
|
|
| - /* Take size of veneer into account. */
|
| - branch_from_veneer = errnode->u.v.branch->vma
|
| - - errnode->vma - 12;
|
| + /* Take size of veneer into account. */
|
| + branch_from_veneer = errnode->u.v.branch->vma
|
| + - errnode->vma - 12;
|
|
|
| if ((signed) branch_from_veneer < -(1 << 25)
|
| || (signed) branch_from_veneer >= (1 << 25))
|
| (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
|
| "range"), output_bfd);
|
|
|
| - /* Original instruction. */
|
| - insn = errnode->u.v.branch->u.b.vfp_insn;
|
| - contents[endianflip ^ target] = insn & 0xff;
|
| - contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
|
| - contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
|
| - contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
|
| -
|
| - /* Branch back to insn after original insn. */
|
| - insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
|
| - contents[endianflip ^ (target + 4)] = insn & 0xff;
|
| - contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
|
| - contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
|
| - contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
|
| - }
|
| - break;
|
| + /* Original instruction. */
|
| + insn = errnode->u.v.branch->u.b.vfp_insn;
|
| + contents[endianflip ^ target] = insn & 0xff;
|
| + contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
|
| + contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
|
| + contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
|
| +
|
| + /* Branch back to insn after original insn. */
|
| + insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
|
| + contents[endianflip ^ (target + 4)] = insn & 0xff;
|
| + contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
|
| + contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
|
| + contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
|
| + }
|
| + break;
|
|
|
| - default:
|
| - abort ();
|
| - }
|
| - }
|
| + default:
|
| + abort ();
|
| + }
|
| + }
|
| }
|
|
|
| if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
|
| {
|
| arm_unwind_table_edit *edit_node
|
| - = arm_data->u.exidx.unwind_edit_list;
|
| + = arm_data->u.exidx.unwind_edit_list;
|
| /* Now, sec->size is the size of the section we will write. The original
|
| - size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
|
| + size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
|
| markers) was sec->rawsize. (This isn't the case if we perform no
|
| edits, then rawsize will be zero and we should use size). */
|
| bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
|
| @@ -15275,13 +15432,13 @@ elf32_arm_write_section (bfd *output_bfd,
|
| bfd_vma add_to_offsets = 0;
|
|
|
| for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
|
| - {
|
| + {
|
| if (edit_node)
|
| {
|
| unsigned int edit_index = edit_node->index;
|
|
|
| if (in_index < edit_index && in_index * 8 < input_size)
|
| - {
|
| + {
|
| copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
|
| contents + in_index * 8, add_to_offsets);
|
| out_index++;
|
| @@ -15290,7 +15447,7 @@ elf32_arm_write_section (bfd *output_bfd,
|
| else if (in_index == edit_index
|
| || (in_index * 8 >= input_size
|
| && edit_index == UINT_MAX))
|
| - {
|
| + {
|
| switch (edit_node->type)
|
| {
|
| case DELETE_EXIDX_ENTRY:
|
| @@ -15300,12 +15457,12 @@ elf32_arm_write_section (bfd *output_bfd,
|
|
|
| case INSERT_EXIDX_CANTUNWIND_AT_END:
|
| {
|
| - asection *text_sec = edit_node->linked_section;
|
| + asection *text_sec = edit_node->linked_section;
|
| bfd_vma text_offset = text_sec->output_section->vma
|
| + text_sec->output_offset
|
| + text_sec->size;
|
| bfd_vma exidx_offset = offset + out_index * 8;
|
| - unsigned long prel31_offset;
|
| + unsigned long prel31_offset;
|
|
|
| /* Note: this is meant to be equivalent to an
|
| R_ARM_PREL31 relocation. These synthetic
|
| @@ -15370,45 +15527,45 @@ elf32_arm_write_section (bfd *output_bfd,
|
|
|
| ptr = map[0].vma;
|
| for (i = 0; i < mapcount; i++)
|
| - {
|
| - if (i == mapcount - 1)
|
| + {
|
| + if (i == mapcount - 1)
|
| end = sec->size;
|
| - else
|
| - end = map[i + 1].vma;
|
| + else
|
| + end = map[i + 1].vma;
|
|
|
| - switch (map[i].type)
|
| + switch (map[i].type)
|
| {
|
| case 'a':
|
| /* Byte swap code words. */
|
| while (ptr + 3 < end)
|
| - {
|
| - tmp = contents[ptr];
|
| - contents[ptr] = contents[ptr + 3];
|
| - contents[ptr + 3] = tmp;
|
| - tmp = contents[ptr + 1];
|
| - contents[ptr + 1] = contents[ptr + 2];
|
| - contents[ptr + 2] = tmp;
|
| - ptr += 4;
|
| - }
|
| + {
|
| + tmp = contents[ptr];
|
| + contents[ptr] = contents[ptr + 3];
|
| + contents[ptr + 3] = tmp;
|
| + tmp = contents[ptr + 1];
|
| + contents[ptr + 1] = contents[ptr + 2];
|
| + contents[ptr + 2] = tmp;
|
| + ptr += 4;
|
| + }
|
| break;
|
|
|
| case 't':
|
| /* Byte swap code halfwords. */
|
| while (ptr + 1 < end)
|
| - {
|
| - tmp = contents[ptr];
|
| - contents[ptr] = contents[ptr + 1];
|
| - contents[ptr + 1] = tmp;
|
| - ptr += 2;
|
| - }
|
| + {
|
| + tmp = contents[ptr];
|
| + contents[ptr] = contents[ptr + 1];
|
| + contents[ptr + 1] = tmp;
|
| + ptr += 2;
|
| + }
|
| break;
|
|
|
| case 'd':
|
| /* Leave data alone. */
|
| break;
|
| }
|
| - ptr = end;
|
| - }
|
| + ptr = end;
|
| + }
|
| }
|
|
|
| free (map);
|
| @@ -15477,17 +15634,17 @@ elf32_arm_swap_symbol_out (bfd *abfd,
|
| if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
|
| newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
|
| if (newsym.st_shndx != SHN_UNDEF)
|
| - {
|
| - /* Do this only for defined symbols. At link type, the static
|
| - linker will simulate the work of dynamic linker of resolving
|
| - symbols and will carry over the thumbness of found symbols to
|
| - the output symbol table. It's not clear how it happens, but
|
| - the thumbness of undefined symbols can well be different at
|
| - runtime, and writing '1' for them will be confusing for users
|
| - and possibly for dynamic linker itself.
|
| - */
|
| - newsym.st_value |= 1;
|
| - }
|
| + {
|
| + /* Do this only for defined symbols. At link type, the static
|
| + linker will simulate the work of dynamic linker of resolving
|
| + symbols and will carry over the thumbness of found symbols to
|
| + the output symbol table. It's not clear how it happens, but
|
| + the thumbness of undefined symbols can well be different at
|
| + runtime, and writing '1' for them will be confusing for users
|
| + and possibly for dynamic linker itself.
|
| + */
|
| + newsym.st_value |= 1;
|
| + }
|
|
|
| src = &newsym;
|
| }
|
| @@ -15509,21 +15666,21 @@ elf32_arm_modify_segment_map (bfd *abfd,
|
| /* If there is already a PT_ARM_EXIDX header, then we do not
|
| want to add another one. This situation arises when running
|
| "strip"; the input binary already has the header. */
|
| - m = elf_tdata (abfd)->segment_map;
|
| + m = elf_seg_map (abfd);
|
| while (m && m->p_type != PT_ARM_EXIDX)
|
| m = m->next;
|
| if (!m)
|
| {
|
| m = (struct elf_segment_map *)
|
| - bfd_zalloc (abfd, sizeof (struct elf_segment_map));
|
| + bfd_zalloc (abfd, sizeof (struct elf_segment_map));
|
| if (m == NULL)
|
| return FALSE;
|
| m->p_type = PT_ARM_EXIDX;
|
| m->count = 1;
|
| m->sections[0] = sec;
|
|
|
| - m->next = elf_tdata (abfd)->segment_map;
|
| - elf_tdata (abfd)->segment_map = m;
|
| + m->next = elf_seg_map (abfd);
|
| + elf_seg_map (abfd) = m;
|
| }
|
| }
|
|
|
| @@ -15720,6 +15877,14 @@ elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
|
| && nacl_modify_segment_map (abfd, info));
|
| }
|
|
|
| +static void
|
| +elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
|
| +{
|
| + elf32_arm_final_write_processing (abfd, linker);
|
| + nacl_final_write_processing (abfd, linker);
|
| +}
|
| +
|
| +
|
| #undef elf32_bed
|
| #define elf32_bed elf32_arm_nacl_bed
|
| #undef bfd_elf32_bfd_link_hash_table_create
|
| @@ -15731,9 +15896,14 @@ elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
|
| #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
|
| #undef elf_backend_modify_program_headers
|
| #define elf_backend_modify_program_headers nacl_modify_program_headers
|
| +#undef elf_backend_final_write_processing
|
| +#define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
|
|
|
| #undef ELF_MAXPAGESIZE
|
| #define ELF_MAXPAGESIZE 0x10000
|
| +#undef ELF_MINPAGESIZE
|
| +#undef ELF_COMMONPAGESIZE
|
| +
|
|
|
| #include "elf32-target.h"
|
|
|
| @@ -15742,6 +15912,13 @@ elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
|
| #undef elf_backend_modify_segment_map
|
| #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
|
| #undef elf_backend_modify_program_headers
|
| +#undef elf_backend_final_write_processing
|
| +#define elf_backend_final_write_processing elf32_arm_final_write_processing
|
| +#undef ELF_MINPAGESIZE
|
| +#define ELF_MINPAGESIZE 0x1000
|
| +#undef ELF_COMMONPAGESIZE
|
| +#define ELF_COMMONPAGESIZE 0x1000
|
| +
|
|
|
| /* VxWorks Targets. */
|
|
|
| @@ -15852,8 +16029,8 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
|
| flags then do not bother setting the flags for the output
|
| architecture, instead allow future merges to do this. If no
|
| future merges ever set these flags then they will retain their
|
| - uninitialised values, which surprise surprise, correspond
|
| - to the default values. */
|
| + uninitialised values, which surprise surprise, correspond
|
| + to the default values. */
|
| if (bfd_get_arch_info (ibfd)->the_default
|
| && elf_elfheader (ibfd)->e_flags == 0)
|
| return TRUE;
|
| @@ -15901,7 +16078,7 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
|
| if ((bfd_get_section_flags (ibfd, sec)
|
| & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
|
| == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
|
| - only_data_sections = FALSE;
|
| + only_data_sections = FALSE;
|
|
|
| null_input_bfd = FALSE;
|
| break;
|
| @@ -16116,15 +16293,15 @@ elf32_arm_symbian_modify_segment_map (bfd *abfd,
|
| dynsec = bfd_get_section_by_name (abfd, ".dynamic");
|
| if (dynsec)
|
| {
|
| - for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
|
| + for (m = elf_seg_map (abfd); m != NULL; m = m->next)
|
| if (m->p_type == PT_DYNAMIC)
|
| break;
|
|
|
| if (m == NULL)
|
| {
|
| m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
|
| - m->next = elf_tdata (abfd)->segment_map;
|
| - elf_tdata (abfd)->segment_map = m;
|
| + m->next = elf_seg_map (abfd);
|
| + elf_seg_map (abfd) = m;
|
| }
|
| }
|
|
|
|
|