Index: gdb/amd64-tdep.c |
diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c |
index ea65d0d55bc8ba795f514d802b89e1eaf212cabe..8ae1142c47e35bde014af73938adc83aa6c12595 100644 |
--- a/gdb/amd64-tdep.c |
+++ b/gdb/amd64-tdep.c |
@@ -43,6 +43,8 @@ |
#include "features/i386/amd64.c" |
#include "features/i386/amd64-avx.c" |
+#include "features/i386/x32.c" |
+#include "features/i386/x32-avx.c" |
#include "ax.h" |
#include "ax-gdb.h" |
@@ -256,7 +258,8 @@ static const char *amd64_word_names[] = |
static const char *amd64_dword_names[] = |
{ |
"eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp", |
- "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" |
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d", |
+ "eip" |
}; |
/* Return the name of register REGNUM. */ |
@@ -592,7 +595,7 @@ amd64_classify (struct type *type, enum amd64_reg_class class[2]) |
} |
static enum return_value_convention |
-amd64_return_value (struct gdbarch *gdbarch, struct type *func_type, |
+amd64_return_value (struct gdbarch *gdbarch, struct value *function, |
struct type *type, struct regcache *regcache, |
gdb_byte *readbuf, const gdb_byte *writebuf) |
{ |
@@ -1258,7 +1261,7 @@ amd64_displaced_step_copy_insn (struct gdbarch *gdbarch, |
struct regcache *regs) |
{ |
int len = gdbarch_max_insn_length (gdbarch); |
- /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to |
+ /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to |
continually watch for running off the end of the buffer. */ |
int fixup_sentinel_space = len; |
struct displaced_step_closure *dsc = |
@@ -1858,6 +1861,188 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, |
return min (pc + offset + 2, current_pc); |
} |
+/* Similar to amd64_analyze_stack_align for x32. */ |
+ |
+static CORE_ADDR |
+amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, |
+ struct amd64_frame_cache *cache) |
+{ |
+ /* There are 2 code sequences to re-align stack before the frame |
+ gets set up: |
+ |
+ 1. Use a caller-saved saved register: |
+ |
+ leaq 8(%rsp), %reg |
+ andq $-XXX, %rsp |
+ pushq -8(%reg) |
+ |
+ or |
+ |
+ [addr32] leal 8(%rsp), %reg |
+ andl $-XXX, %esp |
+ [addr32] pushq -8(%reg) |
+ |
+ 2. Use a callee-saved saved register: |
+ |
+ pushq %reg |
+ leaq 16(%rsp), %reg |
+ andq $-XXX, %rsp |
+ pushq -8(%reg) |
+ |
+ or |
+ |
+ pushq %reg |
+ [addr32] leal 16(%rsp), %reg |
+ andl $-XXX, %esp |
+ [addr32] pushq -8(%reg) |
+ |
+ "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes: |
+ |
+ 0x48 0x83 0xe4 0xf0 andq $-16, %rsp |
+ 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp |
+ |
+ "andl $-XXX, %esp" can be either 3 bytes or 6 bytes: |
+ |
+ 0x83 0xe4 0xf0 andl $-16, %esp |
+ 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp |
+ */ |
+ |
+ gdb_byte buf[19]; |
+ int reg, r; |
+ int offset, offset_and; |
+ |
+ if (target_read_memory (pc, buf, sizeof buf)) |
+ return pc; |
+ |
+ /* Skip optional addr32 prefix. */ |
+ offset = buf[0] == 0x67 ? 1 : 0; |
+ |
+ /* Check caller-saved saved register. The first instruction has |
+ to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */ |
+ if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40) |
+ && buf[offset + 1] == 0x8d |
+ && buf[offset + 3] == 0x24 |
+ && buf[offset + 4] == 0x8) |
+ { |
+ /* MOD must be binary 10 and R/M must be binary 100. */ |
+ if ((buf[offset + 2] & 0xc7) != 0x44) |
+ return pc; |
+ |
+ /* REG has register number. */ |
+ reg = (buf[offset + 2] >> 3) & 7; |
+ |
+ /* Check the REX.R bit. */ |
+ if ((buf[offset] & 0x4) != 0) |
+ reg += 8; |
+ |
+ offset += 5; |
+ } |
+ else |
+ { |
+ /* Check callee-saved saved register. The first instruction |
+ has to be "pushq %reg". */ |
+ reg = 0; |
+ if ((buf[offset] & 0xf6) == 0x40 |
+ && (buf[offset + 1] & 0xf8) == 0x50) |
+ { |
+ /* Check the REX.B bit. */ |
+ if ((buf[offset] & 1) != 0) |
+ reg = 8; |
+ |
+ offset += 1; |
+ } |
+ else if ((buf[offset] & 0xf8) != 0x50) |
+ return pc; |
+ |
+ /* Get register. */ |
+ reg += buf[offset] & 0x7; |
+ |
+ offset++; |
+ |
+ /* Skip optional addr32 prefix. */ |
+ if (buf[offset] == 0x67) |
+ offset++; |
+ |
+ /* The next instruction has to be "leaq 16(%rsp), %reg" or |
+ "leal 16(%rsp), %reg". */ |
+ if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40) |
+ || buf[offset + 1] != 0x8d |
+ || buf[offset + 3] != 0x24 |
+ || buf[offset + 4] != 0x10) |
+ return pc; |
+ |
+ /* MOD must be binary 10 and R/M must be binary 100. */ |
+ if ((buf[offset + 2] & 0xc7) != 0x44) |
+ return pc; |
+ |
+ /* REG has register number. */ |
+ r = (buf[offset + 2] >> 3) & 7; |
+ |
+ /* Check the REX.R bit. */ |
+ if ((buf[offset] & 0x4) != 0) |
+ r += 8; |
+ |
+ /* Registers in pushq and leaq have to be the same. */ |
+ if (reg != r) |
+ return pc; |
+ |
+ offset += 5; |
+ } |
+ |
+ /* Rigister can't be %rsp nor %rbp. */ |
+ if (reg == 4 || reg == 5) |
+ return pc; |
+ |
+ /* The next instruction may be "andq $-XXX, %rsp" or |
+ "andl $-XXX, %esp". */ |
+ if (buf[offset] != 0x48) |
+ offset--; |
+ |
+ if (buf[offset + 2] != 0xe4 |
+ || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83)) |
+ return pc; |
+ |
+ offset_and = offset; |
+ offset += buf[offset + 1] == 0x81 ? 7 : 4; |
+ |
+ /* Skip optional addr32 prefix. */ |
+ if (buf[offset] == 0x67) |
+ offset++; |
+ |
+ /* The next instruction has to be "pushq -8(%reg)". */ |
+ r = 0; |
+ if (buf[offset] == 0xff) |
+ offset++; |
+ else if ((buf[offset] & 0xf6) == 0x40 |
+ && buf[offset + 1] == 0xff) |
+ { |
+ /* Check the REX.B bit. */ |
+ if ((buf[offset] & 0x1) != 0) |
+ r = 8; |
+ offset += 2; |
+ } |
+ else |
+ return pc; |
+ |
+ /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary |
+ 01. */ |
+ if (buf[offset + 1] != 0xf8 |
+ || (buf[offset] & 0xf8) != 0x70) |
+ return pc; |
+ |
+ /* R/M has register. */ |
+ r += buf[offset] & 7; |
+ |
+ /* Registers in leaq and pushq have to be the same. */ |
+ if (reg != r) |
+ return pc; |
+ |
+ if (current_pc > pc + offset_and) |
+ cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg); |
+ |
+ return min (pc + offset + 2, current_pc); |
+} |
+ |
/* Do a limited analysis of the prologue at PC and update CACHE |
accordingly. Bail out early if CURRENT_PC is reached. Return the |
address where the analysis stopped. |
@@ -1865,10 +2050,16 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, |
We will handle only functions beginning with: |
pushq %rbp 0x55 |
- movq %rsp, %rbp 0x48 0x89 0xe5 |
+ movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec) |
+ |
+ or (for the X32 ABI): |
- Any function that doesn't start with this sequence will be assumed |
- to have no prologue and thus no valid frame pointer in %rbp. */ |
+ pushq %rbp 0x55 |
+ movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec) |
+ |
+ Any function that doesn't start with one of these sequences will be |
+ assumed to have no prologue and thus no valid frame pointer in |
+ %rbp. */ |
static CORE_ADDR |
amd64_analyze_prologue (struct gdbarch *gdbarch, |
@@ -1876,14 +2067,23 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, |
struct amd64_frame_cache *cache) |
{ |
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); |
- static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */ |
+ /* There are two variations of movq %rsp, %rbp. */ |
+ static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 }; |
+ static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec }; |
+ /* Ditto for movl %esp, %ebp. */ |
+ static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 }; |
+ static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec }; |
+ |
gdb_byte buf[3]; |
gdb_byte op; |
if (current_pc <= pc) |
return current_pc; |
- pc = amd64_analyze_stack_align (pc, current_pc, cache); |
+ if (gdbarch_ptr_bit (gdbarch) == 32) |
+ pc = amd64_x32_analyze_stack_align (pc, current_pc, cache); |
+ else |
+ pc = amd64_analyze_stack_align (pc, current_pc, cache); |
op = read_memory_unsigned_integer (pc, 1, byte_order); |
@@ -1898,14 +2098,30 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, |
if (current_pc <= pc + 1) |
return current_pc; |
- /* Check for `movq %rsp, %rbp'. */ |
read_memory (pc + 1, buf, 3); |
- if (memcmp (buf, proto, 3) != 0) |
- return pc + 1; |
- /* OK, we actually have a frame. */ |
- cache->frameless_p = 0; |
- return pc + 4; |
+ /* Check for `movq %rsp, %rbp'. */ |
+ if (memcmp (buf, mov_rsp_rbp_1, 3) == 0 |
+ || memcmp (buf, mov_rsp_rbp_2, 3) == 0) |
+ { |
+ /* OK, we actually have a frame. */ |
+ cache->frameless_p = 0; |
+ return pc + 4; |
+ } |
+ |
+ /* For X32, also check for `movq %esp, %ebp'. */ |
+ if (gdbarch_ptr_bit (gdbarch) == 32) |
+ { |
+ if (memcmp (buf, mov_esp_ebp_1, 2) == 0 |
+ || memcmp (buf, mov_esp_ebp_2, 2) == 0) |
+ { |
+ /* OK, we actually have a frame. */ |
+ cache->frameless_p = 0; |
+ return pc + 3; |
+ } |
+ } |
+ |
+ return pc + 1; |
} |
return pc; |
@@ -2688,6 +2904,53 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) |
set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction); |
set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address); |
+ |
+ /* SystemTap variables and functions. */ |
+ set_gdbarch_stap_integer_prefix (gdbarch, "$"); |
+ set_gdbarch_stap_register_prefix (gdbarch, "%"); |
+ set_gdbarch_stap_register_indirection_prefix (gdbarch, "("); |
+ set_gdbarch_stap_register_indirection_suffix (gdbarch, ")"); |
+ set_gdbarch_stap_is_single_operand (gdbarch, |
+ i386_stap_is_single_operand); |
+ set_gdbarch_stap_parse_special_token (gdbarch, |
+ i386_stap_parse_special_token); |
+} |
+ |
+ |
+static struct type * |
+amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum) |
+{ |
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
+ |
+ switch (regnum - tdep->eax_regnum) |
+ { |
+ case AMD64_RBP_REGNUM: /* %ebp */ |
+ case AMD64_RSP_REGNUM: /* %esp */ |
+ return builtin_type (gdbarch)->builtin_data_ptr; |
+ case AMD64_RIP_REGNUM: /* %eip */ |
+ return builtin_type (gdbarch)->builtin_func_ptr; |
+ } |
+ |
+ return i386_pseudo_register_type (gdbarch, regnum); |
+} |
+ |
+void |
+amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) |
+{ |
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
+ const struct target_desc *tdesc = info.target_desc; |
+ |
+ amd64_init_abi (info, gdbarch); |
+ |
+ if (! tdesc_has_registers (tdesc)) |
+ tdesc = tdesc_x32; |
+ tdep->tdesc = tdesc; |
+ |
+ tdep->num_dword_regs = 17; |
+ set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type); |
+ |
+ set_gdbarch_long_bit (gdbarch, 32); |
+ set_gdbarch_ptr_bit (gdbarch, 32); |
} |
/* Provide a prototype to silence -Wmissing-prototypes. */ |
@@ -2698,6 +2961,8 @@ _initialize_amd64_tdep (void) |
{ |
initialize_tdesc_amd64 (); |
initialize_tdesc_amd64_avx (); |
+ initialize_tdesc_x32 (); |
+ initialize_tdesc_x32_avx (); |
} |
@@ -2722,7 +2987,8 @@ amd64_supply_fxsave (struct regcache *regcache, int regnum, |
i387_supply_fxsave (regcache, regnum, fxsave); |
- if (fxsave && gdbarch_ptr_bit (gdbarch) == 64) |
+ if (fxsave |
+ && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) |
{ |
const gdb_byte *regs = fxsave; |
@@ -2744,7 +3010,8 @@ amd64_supply_xsave (struct regcache *regcache, int regnum, |
i387_supply_xsave (regcache, regnum, xsave); |
- if (xsave && gdbarch_ptr_bit (gdbarch) == 64) |
+ if (xsave |
+ && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) |
{ |
const gdb_byte *regs = xsave; |
@@ -2772,7 +3039,7 @@ amd64_collect_fxsave (const struct regcache *regcache, int regnum, |
i387_collect_fxsave (regcache, regnum, fxsave); |
- if (gdbarch_ptr_bit (gdbarch) == 64) |
+ if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) |
{ |
if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) |
regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12); |
@@ -2793,7 +3060,7 @@ amd64_collect_xsave (const struct regcache *regcache, int regnum, |
i387_collect_xsave (regcache, regnum, xsave, gcore); |
- if (gdbarch_ptr_bit (gdbarch) == 64) |
+ if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) |
{ |
if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) |
regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), |