Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: gdb/aarch64-tdep.c

Issue 124383005: GDB 7.6.50 (Closed) Base URL: http://git.chromium.org/native_client/nacl-gdb.git@upstream
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gdb/aarch64-tdep.h ('k') | gdb/acinclude.m4 » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2013 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include <string.h>
28 #include "dis-asm.h"
29 #include "regcache.h"
30 #include "reggroups.h"
31 #include "doublest.h"
32 #include "value.h"
33 #include "arch-utils.h"
34 #include "osabi.h"
35 #include "frame-unwind.h"
36 #include "frame-base.h"
37 #include "trad-frame.h"
38 #include "objfiles.h"
39 #include "dwarf2-frame.h"
40 #include "gdbtypes.h"
41 #include "prologue-value.h"
42 #include "target-descriptions.h"
43 #include "user-regs.h"
44 #include "language.h"
45 #include "infcall.h"
46
47 #include "aarch64-tdep.h"
48
49 #include "elf-bfd.h"
50 #include "elf/aarch64.h"
51
52 #include "gdb_assert.h"
53 #include "vec.h"
54
55 #include "features/aarch64.c"
56
57 /* Pseudo register base numbers. */
58 #define AARCH64_Q0_REGNUM 0
59 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
60 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
61 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
62 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
63
64 /* The standard register names, and all the valid aliases for them. */
65 static const struct
66 {
67 const char *const name;
68 int regnum;
69 } aarch64_register_aliases[] =
70 {
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
75
76 /* 32-bit register names. */
77 {"w0", AARCH64_X0_REGNUM + 0},
78 {"w1", AARCH64_X0_REGNUM + 1},
79 {"w2", AARCH64_X0_REGNUM + 2},
80 {"w3", AARCH64_X0_REGNUM + 3},
81 {"w4", AARCH64_X0_REGNUM + 4},
82 {"w5", AARCH64_X0_REGNUM + 5},
83 {"w6", AARCH64_X0_REGNUM + 6},
84 {"w7", AARCH64_X0_REGNUM + 7},
85 {"w8", AARCH64_X0_REGNUM + 8},
86 {"w9", AARCH64_X0_REGNUM + 9},
87 {"w10", AARCH64_X0_REGNUM + 10},
88 {"w11", AARCH64_X0_REGNUM + 11},
89 {"w12", AARCH64_X0_REGNUM + 12},
90 {"w13", AARCH64_X0_REGNUM + 13},
91 {"w14", AARCH64_X0_REGNUM + 14},
92 {"w15", AARCH64_X0_REGNUM + 15},
93 {"w16", AARCH64_X0_REGNUM + 16},
94 {"w17", AARCH64_X0_REGNUM + 17},
95 {"w18", AARCH64_X0_REGNUM + 18},
96 {"w19", AARCH64_X0_REGNUM + 19},
97 {"w20", AARCH64_X0_REGNUM + 20},
98 {"w21", AARCH64_X0_REGNUM + 21},
99 {"w22", AARCH64_X0_REGNUM + 22},
100 {"w23", AARCH64_X0_REGNUM + 23},
101 {"w24", AARCH64_X0_REGNUM + 24},
102 {"w25", AARCH64_X0_REGNUM + 25},
103 {"w26", AARCH64_X0_REGNUM + 26},
104 {"w27", AARCH64_X0_REGNUM + 27},
105 {"w28", AARCH64_X0_REGNUM + 28},
106 {"w29", AARCH64_X0_REGNUM + 29},
107 {"w30", AARCH64_X0_REGNUM + 30},
108
109 /* specials */
110 {"ip0", AARCH64_X0_REGNUM + 16},
111 {"ip1", AARCH64_X0_REGNUM + 17}
112 };
113
114 /* The required core 'R' registers. */
115 static const char *const aarch64_r_register_names[] =
116 {
117 /* These registers must appear in consecutive RAW register number
118 order and they must begin with AARCH64_X0_REGNUM! */
119 "x0", "x1", "x2", "x3",
120 "x4", "x5", "x6", "x7",
121 "x8", "x9", "x10", "x11",
122 "x12", "x13", "x14", "x15",
123 "x16", "x17", "x18", "x19",
124 "x20", "x21", "x22", "x23",
125 "x24", "x25", "x26", "x27",
126 "x28", "x29", "x30", "sp",
127 "pc", "cpsr"
128 };
129
130 /* The FP/SIMD 'V' registers. */
131 static const char *const aarch64_v_register_names[] =
132 {
133 /* These registers must appear in consecutive RAW register number
134 order and they must begin with AARCH64_V0_REGNUM! */
135 "v0", "v1", "v2", "v3",
136 "v4", "v5", "v6", "v7",
137 "v8", "v9", "v10", "v11",
138 "v12", "v13", "v14", "v15",
139 "v16", "v17", "v18", "v19",
140 "v20", "v21", "v22", "v23",
141 "v24", "v25", "v26", "v27",
142 "v28", "v29", "v30", "v31",
143 "fpsr",
144 "fpcr"
145 };
146
147 /* AArch64 prologue cache structure. */
148 struct aarch64_prologue_cache
149 {
150 /* The stack pointer at the time this frame was created; i.e. the
151 caller's stack pointer when this function was called. It is used
152 to identify this frame. */
153 CORE_ADDR prev_sp;
154
155 /* The frame base for this frame is just prev_sp - frame size.
156 FRAMESIZE is the distance from the frame pointer to the
157 initial stack pointer. */
158 int framesize;
159
160 /* The register used to hold the frame pointer for this frame. */
161 int framereg;
162
163 /* Saved register offsets. */
164 struct trad_frame_saved_reg *saved_regs;
165 };
166
167 /* Toggle this file's internal debugging dump. */
168 static int aarch64_debug;
169
170 static void
171 show_aarch64_debug (struct ui_file *file, int from_tty,
172 struct cmd_list_element *c, const char *value)
173 {
174 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
175 }
176
177 /* Extract a signed value from a bit field within an instruction
178 encoding.
179
180 INSN is the instruction opcode.
181
182 WIDTH specifies the width of the bit field to extract (in bits).
183
184 OFFSET specifies the least significant bit of the field where bits
185 are numbered zero counting from least to most significant. */
186
187 static int32_t
188 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
189 {
190 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
191 unsigned shift_r = sizeof (int32_t) * 8 - width;
192
193 return ((int32_t) insn << shift_l) >> shift_r;
194 }
195
196 /* Determine if specified bits within an instruction opcode matches a
197 specific pattern.
198
199 INSN is the instruction opcode.
200
201 MASK specifies the bits within the opcode that are to be tested
202 agsinst for a match with PATTERN. */
203
204 static int
205 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
206 {
207 return (insn & mask) == pattern;
208 }
209
210 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
211
212 ADDR specifies the address of the opcode.
213 INSN specifies the opcode to test.
214 RD receives the 'rd' field from the decoded instruction.
215 RN receives the 'rn' field from the decoded instruction.
216
217 Return 1 if the opcodes matches and is decoded, otherwise 0. */
218 static int
219 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
220 int32_t *imm)
221 {
222 if ((insn & 0x9f000000) == 0x91000000)
223 {
224 unsigned shift;
225 unsigned op_is_sub;
226
227 *rd = (insn >> 0) & 0x1f;
228 *rn = (insn >> 5) & 0x1f;
229 *imm = (insn >> 10) & 0xfff;
230 shift = (insn >> 22) & 0x3;
231 op_is_sub = (insn >> 30) & 0x1;
232
233 switch (shift)
234 {
235 case 0:
236 break;
237 case 1:
238 *imm <<= 12;
239 break;
240 default:
241 /* UNDEFINED */
242 return 0;
243 }
244
245 if (op_is_sub)
246 *imm = -*imm;
247
248 if (aarch64_debug)
249 fprintf_unfiltered (gdb_stdlog,
250 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
251 core_addr_to_string_nz (addr), insn, *rd, *rn,
252 *imm);
253 return 1;
254 }
255 return 0;
256 }
257
258 /* Decode an opcode if it represents an ADRP instruction.
259
260 ADDR specifies the address of the opcode.
261 INSN specifies the opcode to test.
262 RD receives the 'rd' field from the decoded instruction.
263
264 Return 1 if the opcodes matches and is decoded, otherwise 0. */
265
266 static int
267 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
268 {
269 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
270 {
271 *rd = (insn >> 0) & 0x1f;
272
273 if (aarch64_debug)
274 fprintf_unfiltered (gdb_stdlog,
275 "decode: 0x%s 0x%x adrp x%u, #?\n",
276 core_addr_to_string_nz (addr), insn, *rd);
277 return 1;
278 }
279 return 0;
280 }
281
282 /* Decode an opcode if it represents an branch immediate or branch
283 and link immediate instruction.
284
285 ADDR specifies the address of the opcode.
286 INSN specifies the opcode to test.
287 LINK receives the 'link' bit from the decoded instruction.
288 OFFSET receives the immediate offset from the decoded instruction.
289
290 Return 1 if the opcodes matches and is decoded, otherwise 0. */
291
292 static int
293 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
294 {
295 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
296 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
297 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
298 {
299 *link = insn >> 31;
300 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
301
302 if (aarch64_debug)
303 fprintf_unfiltered (gdb_stdlog,
304 "decode: 0x%s 0x%x %s 0x%s\n",
305 core_addr_to_string_nz (addr), insn,
306 *link ? "bl" : "b",
307 core_addr_to_string_nz (addr + *offset));
308
309 return 1;
310 }
311 return 0;
312 }
313
314 /* Decode an opcode if it represents a conditional branch instruction.
315
316 ADDR specifies the address of the opcode.
317 INSN specifies the opcode to test.
318 COND receives the branch condition field from the decoded
319 instruction.
320 OFFSET receives the immediate offset from the decoded instruction.
321
322 Return 1 if the opcodes matches and is decoded, otherwise 0. */
323
324 static int
325 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
326 {
327 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
328 {
329 *cond = (insn >> 0) & 0xf;
330 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
331
332 if (aarch64_debug)
333 fprintf_unfiltered (gdb_stdlog,
334 "decode: 0x%s 0x%x b<%u> 0x%s\n",
335 core_addr_to_string_nz (addr), insn, *cond,
336 core_addr_to_string_nz (addr + *offset));
337 return 1;
338 }
339 return 0;
340 }
341
342 /* Decode an opcode if it represents a branch via register instruction.
343
344 ADDR specifies the address of the opcode.
345 INSN specifies the opcode to test.
346 LINK receives the 'link' bit from the decoded instruction.
347 RN receives the 'rn' field from the decoded instruction.
348
349 Return 1 if the opcodes matches and is decoded, otherwise 0. */
350
351 static int
352 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
353 {
354 /* 8 4 0 6 2 8 4 0 */
355 /* blr 110101100011111100000000000rrrrr */
356 /* br 110101100001111100000000000rrrrr */
357 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
358 {
359 *link = (insn >> 21) & 1;
360 *rn = (insn >> 5) & 0x1f;
361
362 if (aarch64_debug)
363 fprintf_unfiltered (gdb_stdlog,
364 "decode: 0x%s 0x%x %s 0x%x\n",
365 core_addr_to_string_nz (addr), insn,
366 *link ? "blr" : "br", *rn);
367
368 return 1;
369 }
370 return 0;
371 }
372
373 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
374
375 ADDR specifies the address of the opcode.
376 INSN specifies the opcode to test.
377 IS64 receives the 'sf' field from the decoded instruction.
378 OP receives the 'op' field from the decoded instruction.
379 RN receives the 'rn' field from the decoded instruction.
380 OFFSET receives the 'imm19' field from the decoded instruction.
381
382 Return 1 if the opcodes matches and is decoded, otherwise 0. */
383
384 static int
385 decode_cb (CORE_ADDR addr,
386 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
387 int32_t *offset)
388 {
389 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
390 {
391 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
392 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
393
394 *rn = (insn >> 0) & 0x1f;
395 *is64 = (insn >> 31) & 0x1;
396 *op = (insn >> 24) & 0x1;
397 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
398
399 if (aarch64_debug)
400 fprintf_unfiltered (gdb_stdlog,
401 "decode: 0x%s 0x%x %s 0x%s\n",
402 core_addr_to_string_nz (addr), insn,
403 *op ? "cbnz" : "cbz",
404 core_addr_to_string_nz (addr + *offset));
405 return 1;
406 }
407 return 0;
408 }
409
410 /* Decode an opcode if it represents a ERET instruction.
411
412 ADDR specifies the address of the opcode.
413 INSN specifies the opcode to test.
414
415 Return 1 if the opcodes matches and is decoded, otherwise 0. */
416
417 static int
418 decode_eret (CORE_ADDR addr, uint32_t insn)
419 {
420 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
421 if (insn == 0xd69f03e0)
422 {
423 if (aarch64_debug)
424 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
425 core_addr_to_string_nz (addr), insn);
426 return 1;
427 }
428 return 0;
429 }
430
431 /* Decode an opcode if it represents a MOVZ instruction.
432
433 ADDR specifies the address of the opcode.
434 INSN specifies the opcode to test.
435 RD receives the 'rd' field from the decoded instruction.
436
437 Return 1 if the opcodes matches and is decoded, otherwise 0. */
438
439 static int
440 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
441 {
442 if (decode_masked_match (insn, 0xff800000, 0x52800000))
443 {
444 *rd = (insn >> 0) & 0x1f;
445
446 if (aarch64_debug)
447 fprintf_unfiltered (gdb_stdlog,
448 "decode: 0x%s 0x%x movz x%u, #?\n",
449 core_addr_to_string_nz (addr), insn, *rd);
450 return 1;
451 }
452 return 0;
453 }
454
455 /* Decode an opcode if it represents a ORR (shifted register)
456 instruction.
457
458 ADDR specifies the address of the opcode.
459 INSN specifies the opcode to test.
460 RD receives the 'rd' field from the decoded instruction.
461 RN receives the 'rn' field from the decoded instruction.
462 RM receives the 'rm' field from the decoded instruction.
463 IMM receives the 'imm6' field from the decoded instruction.
464
465 Return 1 if the opcodes matches and is decoded, otherwise 0. */
466
467 static int
468 decode_orr_shifted_register_x (CORE_ADDR addr,
469 uint32_t insn, unsigned *rd, unsigned *rn,
470 unsigned *rm, int32_t *imm)
471 {
472 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
473 {
474 *rd = (insn >> 0) & 0x1f;
475 *rn = (insn >> 5) & 0x1f;
476 *rm = (insn >> 16) & 0x1f;
477 *imm = (insn >> 10) & 0x3f;
478
479 if (aarch64_debug)
480 fprintf_unfiltered (gdb_stdlog,
481 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
482 core_addr_to_string_nz (addr), insn, *rd,
483 *rn, *rm, *imm);
484 return 1;
485 }
486 return 0;
487 }
488
489 /* Decode an opcode if it represents a RET instruction.
490
491 ADDR specifies the address of the opcode.
492 INSN specifies the opcode to test.
493 RN receives the 'rn' field from the decoded instruction.
494
495 Return 1 if the opcodes matches and is decoded, otherwise 0. */
496
497 static int
498 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
499 {
500 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
501 {
502 *rn = (insn >> 5) & 0x1f;
503 if (aarch64_debug)
504 fprintf_unfiltered (gdb_stdlog,
505 "decode: 0x%s 0x%x ret x%u\n",
506 core_addr_to_string_nz (addr), insn, *rn);
507 return 1;
508 }
509 return 0;
510 }
511
512 /* Decode an opcode if it represents the following instruction:
513 STP rt, rt2, [rn, #imm]
514
515 ADDR specifies the address of the opcode.
516 INSN specifies the opcode to test.
517 RT1 receives the 'rt' field from the decoded instruction.
518 RT2 receives the 'rt2' field from the decoded instruction.
519 RN receives the 'rn' field from the decoded instruction.
520 IMM receives the 'imm' field from the decoded instruction.
521
522 Return 1 if the opcodes matches and is decoded, otherwise 0. */
523
524 static int
525 decode_stp_offset (CORE_ADDR addr,
526 uint32_t insn,
527 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
528 {
529 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
530 {
531 *rt1 = (insn >> 0) & 0x1f;
532 *rn = (insn >> 5) & 0x1f;
533 *rt2 = (insn >> 10) & 0x1f;
534 *imm = extract_signed_bitfield (insn, 7, 15);
535 *imm <<= 3;
536
537 if (aarch64_debug)
538 fprintf_unfiltered (gdb_stdlog,
539 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
540 core_addr_to_string_nz (addr), insn,
541 *rt1, *rt2, *rn, *imm);
542 return 1;
543 }
544 return 0;
545 }
546
547 /* Decode an opcode if it represents the following instruction:
548 STP rt, rt2, [rn, #imm]!
549
550 ADDR specifies the address of the opcode.
551 INSN specifies the opcode to test.
552 RT1 receives the 'rt' field from the decoded instruction.
553 RT2 receives the 'rt2' field from the decoded instruction.
554 RN receives the 'rn' field from the decoded instruction.
555 IMM receives the 'imm' field from the decoded instruction.
556
557 Return 1 if the opcodes matches and is decoded, otherwise 0. */
558
559 static int
560 decode_stp_offset_wb (CORE_ADDR addr,
561 uint32_t insn,
562 unsigned *rt1, unsigned *rt2, unsigned *rn,
563 int32_t *imm)
564 {
565 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
566 {
567 *rt1 = (insn >> 0) & 0x1f;
568 *rn = (insn >> 5) & 0x1f;
569 *rt2 = (insn >> 10) & 0x1f;
570 *imm = extract_signed_bitfield (insn, 7, 15);
571 *imm <<= 3;
572
573 if (aarch64_debug)
574 fprintf_unfiltered (gdb_stdlog,
575 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
576 core_addr_to_string_nz (addr), insn,
577 *rt1, *rt2, *rn, *imm);
578 return 1;
579 }
580 return 0;
581 }
582
583 /* Decode an opcode if it represents the following instruction:
584 STUR rt, [rn, #imm]
585
586 ADDR specifies the address of the opcode.
587 INSN specifies the opcode to test.
588 IS64 receives size field from the decoded instruction.
589 RT receives the 'rt' field from the decoded instruction.
590 RN receives the 'rn' field from the decoded instruction.
591 IMM receives the 'imm' field from the decoded instruction.
592
593 Return 1 if the opcodes matches and is decoded, otherwise 0. */
594
595 static int
596 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
597 unsigned *rn, int32_t *imm)
598 {
599 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
600 {
601 *is64 = (insn >> 30) & 1;
602 *rt = (insn >> 0) & 0x1f;
603 *rn = (insn >> 5) & 0x1f;
604 *imm = extract_signed_bitfield (insn, 9, 12);
605
606 if (aarch64_debug)
607 fprintf_unfiltered (gdb_stdlog,
608 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
609 core_addr_to_string_nz (addr), insn,
610 *is64 ? 'x' : 'w', *rt, *rn, *imm);
611 return 1;
612 }
613 return 0;
614 }
615
616 /* Decode an opcode if it represents a TB or TBNZ instruction.
617
618 ADDR specifies the address of the opcode.
619 INSN specifies the opcode to test.
620 OP receives the 'op' field from the decoded instruction.
621 BIT receives the bit position field from the decoded instruction.
622 RT receives 'rt' field from the decoded instruction.
623 IMM receives 'imm' field from the decoded instruction.
624
625 Return 1 if the opcodes matches and is decoded, otherwise 0. */
626
627 static int
628 decode_tb (CORE_ADDR addr,
629 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
630 int32_t *imm)
631 {
632 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
633 {
634 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
635 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
636
637 *rt = (insn >> 0) & 0x1f;
638 *op = insn & (1 << 24);
639 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
640 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
641
642 if (aarch64_debug)
643 fprintf_unfiltered (gdb_stdlog,
644 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
645 core_addr_to_string_nz (addr), insn,
646 *op ? "tbnz" : "tbz", *rt, *bit,
647 core_addr_to_string_nz (addr + *imm));
648 return 1;
649 }
650 return 0;
651 }
652
653 /* Analyze a prologue, looking for a recognizable stack frame
654 and frame pointer. Scan until we encounter a store that could
655 clobber the stack frame unexpectedly, or an unknown instruction. */
656
657 static CORE_ADDR
658 aarch64_analyze_prologue (struct gdbarch *gdbarch,
659 CORE_ADDR start, CORE_ADDR limit,
660 struct aarch64_prologue_cache *cache)
661 {
662 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
663 int i;
664 pv_t regs[AARCH64_X_REGISTER_COUNT];
665 struct pv_area *stack;
666 struct cleanup *back_to;
667
668 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
669 regs[i] = pv_register (i, 0);
670 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
671 back_to = make_cleanup_free_pv_area (stack);
672
673 for (; start < limit; start += 4)
674 {
675 uint32_t insn;
676 unsigned rd;
677 unsigned rn;
678 unsigned rm;
679 unsigned rt;
680 unsigned rt1;
681 unsigned rt2;
682 int op_is_sub;
683 int32_t imm;
684 unsigned cond;
685 int is64;
686 unsigned is_link;
687 unsigned op;
688 unsigned bit;
689 int32_t offset;
690
691 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
692
693 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
694 regs[rd] = pv_add_constant (regs[rn], imm);
695 else if (decode_adrp (start, insn, &rd))
696 regs[rd] = pv_unknown ();
697 else if (decode_b (start, insn, &is_link, &offset))
698 {
699 /* Stop analysis on branch. */
700 break;
701 }
702 else if (decode_bcond (start, insn, &cond, &offset))
703 {
704 /* Stop analysis on branch. */
705 break;
706 }
707 else if (decode_br (start, insn, &is_link, &rn))
708 {
709 /* Stop analysis on branch. */
710 break;
711 }
712 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
713 {
714 /* Stop analysis on branch. */
715 break;
716 }
717 else if (decode_eret (start, insn))
718 {
719 /* Stop analysis on branch. */
720 break;
721 }
722 else if (decode_movz (start, insn, &rd))
723 regs[rd] = pv_unknown ();
724 else
725 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
726 {
727 if (imm == 0 && rn == 31)
728 regs[rd] = regs[rm];
729 else
730 {
731 if (aarch64_debug)
732 fprintf_unfiltered
733 (gdb_stdlog,
734 "aarch64: prologue analysis gave up addr=0x%s "
735 "opcode=0x%x (orr x register)\n",
736 core_addr_to_string_nz (start),
737 insn);
738 break;
739 }
740 }
741 else if (decode_ret (start, insn, &rn))
742 {
743 /* Stop analysis on branch. */
744 break;
745 }
746 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
747 {
748 pv_area_store (stack, pv_add_constant (regs[rn], offset),
749 is64 ? 8 : 4, regs[rt]);
750 }
751 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
752 {
753 /* If recording this store would invalidate the store area
754 (perhaps because rn is not known) then we should abandon
755 further prologue analysis. */
756 if (pv_area_store_would_trash (stack,
757 pv_add_constant (regs[rn], imm)))
758 break;
759
760 if (pv_area_store_would_trash (stack,
761 pv_add_constant (regs[rn], imm + 8)))
762 break;
763
764 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
765 regs[rt1]);
766 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
767 regs[rt2]);
768 }
769 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
770 {
771 /* If recording this store would invalidate the store area
772 (perhaps because rn is not known) then we should abandon
773 further prologue analysis. */
774 if (pv_area_store_would_trash (stack,
775 pv_add_constant (regs[rn], imm)))
776 break;
777
778 if (pv_area_store_would_trash (stack,
779 pv_add_constant (regs[rn], imm + 8)))
780 break;
781
782 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
783 regs[rt1]);
784 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
785 regs[rt2]);
786 regs[rn] = pv_add_constant (regs[rn], imm);
787 }
788 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
789 {
790 /* Stop analysis on branch. */
791 break;
792 }
793 else
794 {
795 if (aarch64_debug)
796 fprintf_unfiltered (gdb_stdlog,
797 "aarch64: prologue analysis gave up addr=0x%s"
798 " opcode=0x%x\n",
799 core_addr_to_string_nz (start), insn);
800 break;
801 }
802 }
803
804 if (cache == NULL)
805 {
806 do_cleanups (back_to);
807 return start;
808 }
809
810 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
811 {
812 /* Frame pointer is fp. Frame size is constant. */
813 cache->framereg = AARCH64_FP_REGNUM;
814 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
815 }
816 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
817 {
818 /* Try the stack pointer. */
819 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
820 cache->framereg = AARCH64_SP_REGNUM;
821 }
822 else
823 {
824 /* We're just out of luck. We don't know where the frame is. */
825 cache->framereg = -1;
826 cache->framesize = 0;
827 }
828
829 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
830 {
831 CORE_ADDR offset;
832
833 if (pv_area_find_reg (stack, gdbarch, i, &offset))
834 cache->saved_regs[i].addr = offset;
835 }
836
837 do_cleanups (back_to);
838 return start;
839 }
840
841 /* Implement the "skip_prologue" gdbarch method. */
842
843 static CORE_ADDR
844 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
845 {
846 unsigned long inst;
847 CORE_ADDR skip_pc;
848 CORE_ADDR func_addr, limit_pc;
849 struct symtab_and_line sal;
850
851 /* See if we can determine the end of the prologue via the symbol
852 table. If so, then return either PC, or the PC after the
853 prologue, whichever is greater. */
854 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
855 {
856 CORE_ADDR post_prologue_pc
857 = skip_prologue_using_sal (gdbarch, func_addr);
858
859 if (post_prologue_pc != 0)
860 return max (pc, post_prologue_pc);
861 }
862
863 /* Can't determine prologue from the symbol table, need to examine
864 instructions. */
865
866 /* Find an upper limit on the function prologue using the debug
867 information. If the debug information could not be used to
868 provide that bound, then use an arbitrary large number as the
869 upper bound. */
870 limit_pc = skip_prologue_using_sal (gdbarch, pc);
871 if (limit_pc == 0)
872 limit_pc = pc + 128; /* Magic. */
873
874 /* Try disassembling prologue. */
875 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
876 }
877
878 /* Scan the function prologue for THIS_FRAME and populate the prologue
879 cache CACHE. */
880
881 static void
882 aarch64_scan_prologue (struct frame_info *this_frame,
883 struct aarch64_prologue_cache *cache)
884 {
885 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
886 CORE_ADDR prologue_start;
887 CORE_ADDR prologue_end;
888 CORE_ADDR prev_pc = get_frame_pc (this_frame);
889 struct gdbarch *gdbarch = get_frame_arch (this_frame);
890
891 /* Assume we do not find a frame. */
892 cache->framereg = -1;
893 cache->framesize = 0;
894
895 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
896 &prologue_end))
897 {
898 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
899
900 if (sal.line == 0)
901 {
902 /* No line info so use the current PC. */
903 prologue_end = prev_pc;
904 }
905 else if (sal.end < prologue_end)
906 {
907 /* The next line begins after the function end. */
908 prologue_end = sal.end;
909 }
910
911 prologue_end = min (prologue_end, prev_pc);
912 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
913 }
914 else
915 {
916 CORE_ADDR frame_loc;
917 LONGEST saved_fp;
918 LONGEST saved_lr;
919 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
920
921 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
922 if (frame_loc == 0)
923 return;
924
925 cache->framereg = AARCH64_FP_REGNUM;
926 cache->framesize = 16;
927 cache->saved_regs[29].addr = 0;
928 cache->saved_regs[30].addr = 8;
929 }
930 }
931
932 /* Allocate an aarch64_prologue_cache and fill it with information
933 about the prologue of *THIS_FRAME. */
934
935 static struct aarch64_prologue_cache *
936 aarch64_make_prologue_cache (struct frame_info *this_frame)
937 {
938 struct aarch64_prologue_cache *cache;
939 CORE_ADDR unwound_fp;
940 int reg;
941
942 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
943 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
944
945 aarch64_scan_prologue (this_frame, cache);
946
947 if (cache->framereg == -1)
948 return cache;
949
950 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
951 if (unwound_fp == 0)
952 return cache;
953
954 cache->prev_sp = unwound_fp + cache->framesize;
955
956 /* Calculate actual addresses of saved registers using offsets
957 determined by aarch64_analyze_prologue. */
958 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
959 if (trad_frame_addr_p (cache->saved_regs, reg))
960 cache->saved_regs[reg].addr += cache->prev_sp;
961
962 return cache;
963 }
964
965 /* Our frame ID for a normal frame is the current function's starting
966 PC and the caller's SP when we were called. */
967
968 static void
969 aarch64_prologue_this_id (struct frame_info *this_frame,
970 void **this_cache, struct frame_id *this_id)
971 {
972 struct aarch64_prologue_cache *cache;
973 struct frame_id id;
974 CORE_ADDR pc, func;
975
976 if (*this_cache == NULL)
977 *this_cache = aarch64_make_prologue_cache (this_frame);
978 cache = *this_cache;
979
980 /* This is meant to halt the backtrace at "_start". */
981 pc = get_frame_pc (this_frame);
982 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
983 return;
984
985 /* If we've hit a wall, stop. */
986 if (cache->prev_sp == 0)
987 return;
988
989 func = get_frame_func (this_frame);
990 id = frame_id_build (cache->prev_sp, func);
991 *this_id = id;
992 }
993
994 /* Implement the "prev_register" frame_unwind method. */
995
996 static struct value *
997 aarch64_prologue_prev_register (struct frame_info *this_frame,
998 void **this_cache, int prev_regnum)
999 {
1000 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1001 struct aarch64_prologue_cache *cache;
1002
1003 if (*this_cache == NULL)
1004 *this_cache = aarch64_make_prologue_cache (this_frame);
1005 cache = *this_cache;
1006
1007 /* If we are asked to unwind the PC, then we need to return the LR
1008 instead. The prologue may save PC, but it will point into this
1009 frame's prologue, not the next frame's resume location. */
1010 if (prev_regnum == AARCH64_PC_REGNUM)
1011 {
1012 CORE_ADDR lr;
1013
1014 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1015 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1016 }
1017
1018 /* SP is generally not saved to the stack, but this frame is
1019 identified by the next frame's stack pointer at the time of the
1020 call. The value was already reconstructed into PREV_SP. */
1021 /*
1022 +----------+ ^
1023 | saved lr | |
1024 +->| saved fp |--+
1025 | | |
1026 | | | <- Previous SP
1027 | +----------+
1028 | | saved lr |
1029 +--| saved fp |<- FP
1030 | |
1031 | |<- SP
1032 +----------+ */
1033 if (prev_regnum == AARCH64_SP_REGNUM)
1034 return frame_unwind_got_constant (this_frame, prev_regnum,
1035 cache->prev_sp);
1036
1037 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1038 prev_regnum);
1039 }
1040
1041 /* AArch64 prologue unwinder. */
1042 struct frame_unwind aarch64_prologue_unwind =
1043 {
1044 NORMAL_FRAME,
1045 default_frame_unwind_stop_reason,
1046 aarch64_prologue_this_id,
1047 aarch64_prologue_prev_register,
1048 NULL,
1049 default_frame_sniffer
1050 };
1051
1052 /* Allocate an aarch64_prologue_cache and fill it with information
1053 about the prologue of *THIS_FRAME. */
1054
1055 static struct aarch64_prologue_cache *
1056 aarch64_make_stub_cache (struct frame_info *this_frame)
1057 {
1058 int reg;
1059 struct aarch64_prologue_cache *cache;
1060 CORE_ADDR unwound_fp;
1061
1062 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1063 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1064
1065 cache->prev_sp
1066 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1067
1068 return cache;
1069 }
1070
1071 /* Our frame ID for a stub frame is the current SP and LR. */
1072
1073 static void
1074 aarch64_stub_this_id (struct frame_info *this_frame,
1075 void **this_cache, struct frame_id *this_id)
1076 {
1077 struct aarch64_prologue_cache *cache;
1078
1079 if (*this_cache == NULL)
1080 *this_cache = aarch64_make_stub_cache (this_frame);
1081 cache = *this_cache;
1082
1083 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1084 }
1085
1086 /* Implement the "sniffer" frame_unwind method. */
1087
1088 static int
1089 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1090 struct frame_info *this_frame,
1091 void **this_prologue_cache)
1092 {
1093 CORE_ADDR addr_in_block;
1094 gdb_byte dummy[4];
1095
1096 addr_in_block = get_frame_address_in_block (this_frame);
1097 if (in_plt_section (addr_in_block)
1098 /* We also use the stub winder if the target memory is unreadable
1099 to avoid having the prologue unwinder trying to read it. */
1100 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1101 return 1;
1102
1103 return 0;
1104 }
1105
1106 /* AArch64 stub unwinder. */
1107 struct frame_unwind aarch64_stub_unwind =
1108 {
1109 NORMAL_FRAME,
1110 default_frame_unwind_stop_reason,
1111 aarch64_stub_this_id,
1112 aarch64_prologue_prev_register,
1113 NULL,
1114 aarch64_stub_unwind_sniffer
1115 };
1116
1117 /* Return the frame base address of *THIS_FRAME. */
1118
1119 static CORE_ADDR
1120 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1121 {
1122 struct aarch64_prologue_cache *cache;
1123
1124 if (*this_cache == NULL)
1125 *this_cache = aarch64_make_prologue_cache (this_frame);
1126 cache = *this_cache;
1127
1128 return cache->prev_sp - cache->framesize;
1129 }
1130
1131 /* AArch64 default frame base information. */
1132 struct frame_base aarch64_normal_base =
1133 {
1134 &aarch64_prologue_unwind,
1135 aarch64_normal_frame_base,
1136 aarch64_normal_frame_base,
1137 aarch64_normal_frame_base
1138 };
1139
1140 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1141 dummy frame. The frame ID's base needs to match the TOS value
1142 saved by save_dummy_frame_tos () and returned from
1143 aarch64_push_dummy_call, and the PC needs to match the dummy
1144 frame's breakpoint. */
1145
1146 static struct frame_id
1147 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1148 {
1149 return frame_id_build (get_frame_register_unsigned (this_frame,
1150 AARCH64_SP_REGNUM),
1151 get_frame_pc (this_frame));
1152 }
1153
1154 /* Implement the "unwind_pc" gdbarch method. */
1155
1156 static CORE_ADDR
1157 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1158 {
1159 CORE_ADDR pc
1160 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1161
1162 return pc;
1163 }
1164
1165 /* Implement the "unwind_sp" gdbarch method. */
1166
1167 static CORE_ADDR
1168 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1169 {
1170 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1171 }
1172
1173 /* Return the value of the REGNUM register in the previous frame of
1174 *THIS_FRAME. */
1175
1176 static struct value *
1177 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1178 void **this_cache, int regnum)
1179 {
1180 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1181 CORE_ADDR lr;
1182
1183 switch (regnum)
1184 {
1185 case AARCH64_PC_REGNUM:
1186 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1187 return frame_unwind_got_constant (this_frame, regnum, lr);
1188
1189 default:
1190 internal_error (__FILE__, __LINE__,
1191 _("Unexpected register %d"), regnum);
1192 }
1193 }
1194
1195 /* Implement the "init_reg" dwarf2_frame_ops method. */
1196
1197 static void
1198 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1199 struct dwarf2_frame_state_reg *reg,
1200 struct frame_info *this_frame)
1201 {
1202 switch (regnum)
1203 {
1204 case AARCH64_PC_REGNUM:
1205 reg->how = DWARF2_FRAME_REG_FN;
1206 reg->loc.fn = aarch64_dwarf2_prev_register;
1207 break;
1208 case AARCH64_SP_REGNUM:
1209 reg->how = DWARF2_FRAME_REG_CFA;
1210 break;
1211 }
1212 }
1213
1214 /* When arguments must be pushed onto the stack, they go on in reverse
1215 order. The code below implements a FILO (stack) to do this. */
1216
1217 typedef struct
1218 {
1219 /* Value to pass on stack. */
1220 const void *data;
1221
1222 /* Size in bytes of value to pass on stack. */
1223 int len;
1224 } stack_item_t;
1225
1226 DEF_VEC_O (stack_item_t);
1227
1228 /* Return the alignment (in bytes) of the given type. */
1229
1230 static int
1231 aarch64_type_align (struct type *t)
1232 {
1233 int n;
1234 int align;
1235 int falign;
1236
1237 t = check_typedef (t);
1238 switch (TYPE_CODE (t))
1239 {
1240 default:
1241 /* Should never happen. */
1242 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1243 return 4;
1244
1245 case TYPE_CODE_PTR:
1246 case TYPE_CODE_ENUM:
1247 case TYPE_CODE_INT:
1248 case TYPE_CODE_FLT:
1249 case TYPE_CODE_SET:
1250 case TYPE_CODE_RANGE:
1251 case TYPE_CODE_BITSTRING:
1252 case TYPE_CODE_REF:
1253 case TYPE_CODE_CHAR:
1254 case TYPE_CODE_BOOL:
1255 return TYPE_LENGTH (t);
1256
1257 case TYPE_CODE_ARRAY:
1258 case TYPE_CODE_COMPLEX:
1259 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1260
1261 case TYPE_CODE_STRUCT:
1262 case TYPE_CODE_UNION:
1263 align = 1;
1264 for (n = 0; n < TYPE_NFIELDS (t); n++)
1265 {
1266 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1267 if (falign > align)
1268 align = falign;
1269 }
1270 return align;
1271 }
1272 }
1273
1274 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1275 defined in the AAPCS64 ABI document; otherwise return 0. */
1276
1277 static int
1278 is_hfa (struct type *ty)
1279 {
1280 switch (TYPE_CODE (ty))
1281 {
1282 case TYPE_CODE_ARRAY:
1283 {
1284 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1285 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1286 return 1;
1287 break;
1288 }
1289
1290 case TYPE_CODE_UNION:
1291 case TYPE_CODE_STRUCT:
1292 {
1293 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1294 {
1295 struct type *member0_type;
1296
1297 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1298 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1299 {
1300 int i;
1301
1302 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1303 {
1304 struct type *member1_type;
1305
1306 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1307 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1308 || (TYPE_LENGTH (member0_type)
1309 != TYPE_LENGTH (member1_type)))
1310 return 0;
1311 }
1312 return 1;
1313 }
1314 }
1315 return 0;
1316 }
1317
1318 default:
1319 break;
1320 }
1321
1322 return 0;
1323 }
1324
1325 /* AArch64 function call information structure. */
1326 struct aarch64_call_info
1327 {
1328 /* the current argument number. */
1329 unsigned argnum;
1330
1331 /* The next general purpose register number, equivalent to NGRN as
1332 described in the AArch64 Procedure Call Standard. */
1333 unsigned ngrn;
1334
1335 /* The next SIMD and floating point register number, equivalent to
1336 NSRN as described in the AArch64 Procedure Call Standard. */
1337 unsigned nsrn;
1338
1339 /* The next stacked argument address, equivalent to NSAA as
1340 described in the AArch64 Procedure Call Standard. */
1341 unsigned nsaa;
1342
1343 /* Stack item vector. */
1344 VEC(stack_item_t) *si;
1345 };
1346
1347 /* Pass a value in a sequence of consecutive X registers. The caller
1348 is responsbile for ensuring sufficient registers are available. */
1349
1350 static void
1351 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1352 struct aarch64_call_info *info, struct type *type,
1353 const bfd_byte *buf)
1354 {
1355 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1356 int len = TYPE_LENGTH (type);
1357 enum type_code typecode = TYPE_CODE (type);
1358 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1359
1360 info->argnum++;
1361
1362 while (len > 0)
1363 {
1364 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1365 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1366 byte_order);
1367
1368
1369 /* Adjust sub-word struct/union args when big-endian. */
1370 if (byte_order == BFD_ENDIAN_BIG
1371 && partial_len < X_REGISTER_SIZE
1372 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1373 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1374
1375 if (aarch64_debug)
1376 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1377 info->argnum,
1378 gdbarch_register_name (gdbarch, regnum),
1379 phex (regval, X_REGISTER_SIZE));
1380 regcache_cooked_write_unsigned (regcache, regnum, regval);
1381 len -= partial_len;
1382 buf += partial_len;
1383 regnum++;
1384 }
1385 }
1386
1387 /* Attempt to marshall a value in a V register. Return 1 if
1388 successful, or 0 if insufficient registers are available. This
1389 function, unlike the equivalent pass_in_x() function does not
1390 handle arguments spread across multiple registers. */
1391
1392 static int
1393 pass_in_v (struct gdbarch *gdbarch,
1394 struct regcache *regcache,
1395 struct aarch64_call_info *info,
1396 const bfd_byte *buf)
1397 {
1398 if (info->nsrn < 8)
1399 {
1400 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1401 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1402
1403 info->argnum++;
1404 info->nsrn++;
1405
1406 regcache_cooked_write (regcache, regnum, buf);
1407 if (aarch64_debug)
1408 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1409 info->argnum,
1410 gdbarch_register_name (gdbarch, regnum));
1411 return 1;
1412 }
1413 info->nsrn = 8;
1414 return 0;
1415 }
1416
1417 /* Marshall an argument onto the stack. */
1418
1419 static void
1420 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1421 const bfd_byte *buf)
1422 {
1423 int len = TYPE_LENGTH (type);
1424 int align;
1425 stack_item_t item;
1426
1427 info->argnum++;
1428
1429 align = aarch64_type_align (type);
1430
1431 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1432 Natural alignment of the argument's type. */
1433 align = align_up (align, 8);
1434
1435 /* The AArch64 PCS requires at most doubleword alignment. */
1436 if (align > 16)
1437 align = 16;
1438
1439 if (aarch64_debug)
1440 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1441 info->argnum, len, info->nsaa);
1442
1443 item.len = len;
1444 item.data = buf;
1445 VEC_safe_push (stack_item_t, info->si, &item);
1446
1447 info->nsaa += len;
1448 if (info->nsaa & (align - 1))
1449 {
1450 /* Push stack alignment padding. */
1451 int pad = align - (info->nsaa & (align - 1));
1452
1453 item.len = pad;
1454 item.data = buf;
1455
1456 VEC_safe_push (stack_item_t, info->si, &item);
1457 info->nsaa += pad;
1458 }
1459 }
1460
1461 /* Marshall an argument into a sequence of one or more consecutive X
1462 registers or, if insufficient X registers are available then onto
1463 the stack. */
1464
1465 static void
1466 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1467 struct aarch64_call_info *info, struct type *type,
1468 const bfd_byte *buf)
1469 {
1470 int len = TYPE_LENGTH (type);
1471 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1472
1473 /* PCS C.13 - Pass in registers if we have enough spare */
1474 if (info->ngrn + nregs <= 8)
1475 {
1476 pass_in_x (gdbarch, regcache, info, type, buf);
1477 info->ngrn += nregs;
1478 }
1479 else
1480 {
1481 info->ngrn = 8;
1482 pass_on_stack (info, type, buf);
1483 }
1484 }
1485
1486 /* Pass a value in a V register, or on the stack if insufficient are
1487 available. */
1488
1489 static void
1490 pass_in_v_or_stack (struct gdbarch *gdbarch,
1491 struct regcache *regcache,
1492 struct aarch64_call_info *info,
1493 struct type *type,
1494 const bfd_byte *buf)
1495 {
1496 if (!pass_in_v (gdbarch, regcache, info, buf))
1497 pass_on_stack (info, type, buf);
1498 }
1499
1500 /* Implement the "push_dummy_call" gdbarch method. */
1501
1502 static CORE_ADDR
1503 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1504 struct regcache *regcache, CORE_ADDR bp_addr,
1505 int nargs,
1506 struct value **args, CORE_ADDR sp, int struct_return,
1507 CORE_ADDR struct_addr)
1508 {
1509 int nstack = 0;
1510 int argnum;
1511 int x_argreg;
1512 int v_argreg;
1513 struct aarch64_call_info info;
1514 struct type *func_type;
1515 struct type *return_type;
1516 int lang_struct_return;
1517
1518 memset (&info, 0, sizeof (info));
1519
1520 /* We need to know what the type of the called function is in order
1521 to determine the number of named/anonymous arguments for the
1522 actual argument placement, and the return type in order to handle
1523 return value correctly.
1524
1525 The generic code above us views the decision of return in memory
1526 or return in registers as a two stage processes. The language
1527 handler is consulted first and may decide to return in memory (eg
1528 class with copy constructor returned by value), this will cause
1529 the generic code to allocate space AND insert an initial leading
1530 argument.
1531
1532 If the language code does not decide to pass in memory then the
1533 target code is consulted.
1534
1535 If the language code decides to pass in memory we want to move
1536 the pointer inserted as the initial argument from the argument
1537 list and into X8, the conventional AArch64 struct return pointer
1538 register.
1539
1540 This is slightly awkward, ideally the flag "lang_struct_return"
1541 would be passed to the targets implementation of push_dummy_call.
1542 Rather that change the target interface we call the language code
1543 directly ourselves. */
1544
1545 func_type = check_typedef (value_type (function));
1546
1547 /* Dereference function pointer types. */
1548 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1549 func_type = TYPE_TARGET_TYPE (func_type);
1550
1551 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1552 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1553
1554 /* If language_pass_by_reference () returned true we will have been
1555 given an additional initial argument, a hidden pointer to the
1556 return slot in memory. */
1557 return_type = TYPE_TARGET_TYPE (func_type);
1558 lang_struct_return = language_pass_by_reference (return_type);
1559
1560 /* Set the return address. For the AArch64, the return breakpoint
1561 is always at BP_ADDR. */
1562 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1563
1564 /* If we were given an initial argument for the return slot because
1565 lang_struct_return was true, lose it. */
1566 if (lang_struct_return)
1567 {
1568 args++;
1569 nargs--;
1570 }
1571
1572 /* The struct_return pointer occupies X8. */
1573 if (struct_return || lang_struct_return)
1574 {
1575 if (aarch64_debug)
1576 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1577 gdbarch_register_name
1578 (gdbarch,
1579 AARCH64_STRUCT_RETURN_REGNUM),
1580 paddress (gdbarch, struct_addr));
1581 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1582 struct_addr);
1583 }
1584
1585 for (argnum = 0; argnum < nargs; argnum++)
1586 {
1587 struct value *arg = args[argnum];
1588 struct type *arg_type;
1589 int len;
1590
1591 arg_type = check_typedef (value_type (arg));
1592 len = TYPE_LENGTH (arg_type);
1593
1594 switch (TYPE_CODE (arg_type))
1595 {
1596 case TYPE_CODE_INT:
1597 case TYPE_CODE_BOOL:
1598 case TYPE_CODE_CHAR:
1599 case TYPE_CODE_RANGE:
1600 case TYPE_CODE_ENUM:
1601 if (len < 4)
1602 {
1603 /* Promote to 32 bit integer. */
1604 if (TYPE_UNSIGNED (arg_type))
1605 arg_type = builtin_type (gdbarch)->builtin_uint32;
1606 else
1607 arg_type = builtin_type (gdbarch)->builtin_int32;
1608 arg = value_cast (arg_type, arg);
1609 }
1610 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1611 value_contents (arg));
1612 break;
1613
1614 case TYPE_CODE_COMPLEX:
1615 if (info.nsrn <= 6)
1616 {
1617 const bfd_byte *buf = value_contents (arg);
1618 struct type *target_type =
1619 check_typedef (TYPE_TARGET_TYPE (arg_type));
1620
1621 pass_in_v (gdbarch, regcache, &info, buf);
1622 pass_in_v (gdbarch, regcache, &info,
1623 buf + TYPE_LENGTH (target_type));
1624 }
1625 else
1626 {
1627 info.nsrn = 8;
1628 pass_on_stack (&info, arg_type, value_contents (arg));
1629 }
1630 break;
1631 case TYPE_CODE_FLT:
1632 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1633 value_contents (arg));
1634 break;
1635
1636 case TYPE_CODE_STRUCT:
1637 case TYPE_CODE_ARRAY:
1638 case TYPE_CODE_UNION:
1639 if (is_hfa (arg_type))
1640 {
1641 int elements = TYPE_NFIELDS (arg_type);
1642
1643 /* Homogeneous Aggregates */
1644 if (info.nsrn + elements < 8)
1645 {
1646 int i;
1647
1648 for (i = 0; i < elements; i++)
1649 {
1650 /* We know that we have sufficient registers
1651 available therefore this will never fallback
1652 to the stack. */
1653 struct value *field =
1654 value_primitive_field (arg, 0, i, arg_type);
1655 struct type *field_type =
1656 check_typedef (value_type (field));
1657
1658 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1659 value_contents_writeable (field));
1660 }
1661 }
1662 else
1663 {
1664 info.nsrn = 8;
1665 pass_on_stack (&info, arg_type, value_contents (arg));
1666 }
1667 }
1668 else if (len > 16)
1669 {
1670 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1671 invisible reference. */
1672
1673 /* Allocate aligned storage. */
1674 sp = align_down (sp - len, 16);
1675
1676 /* Write the real data into the stack. */
1677 write_memory (sp, value_contents (arg), len);
1678
1679 /* Construct the indirection. */
1680 arg_type = lookup_pointer_type (arg_type);
1681 arg = value_from_pointer (arg_type, sp);
1682 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1683 value_contents (arg));
1684 }
1685 else
1686 /* PCS C.15 / C.18 multiple values pass. */
1687 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1688 value_contents (arg));
1689 break;
1690
1691 default:
1692 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1693 value_contents (arg));
1694 break;
1695 }
1696 }
1697
1698 /* Make sure stack retains 16 byte alignment. */
1699 if (info.nsaa & 15)
1700 sp -= 16 - (info.nsaa & 15);
1701
1702 while (!VEC_empty (stack_item_t, info.si))
1703 {
1704 stack_item_t *si = VEC_last (stack_item_t, info.si);
1705
1706 sp -= si->len;
1707 write_memory (sp, si->data, si->len);
1708 VEC_pop (stack_item_t, info.si);
1709 }
1710
1711 VEC_free (stack_item_t, info.si);
1712
1713 /* Finally, update the SP register. */
1714 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1715
1716 return sp;
1717 }
1718
1719 /* Implement the "frame_align" gdbarch method. */
1720
1721 static CORE_ADDR
1722 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1723 {
1724 /* Align the stack to sixteen bytes. */
1725 return sp & ~(CORE_ADDR) 15;
1726 }
1727
1728 /* Return the type for an AdvSISD Q register. */
1729
1730 static struct type *
1731 aarch64_vnq_type (struct gdbarch *gdbarch)
1732 {
1733 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1734
1735 if (tdep->vnq_type == NULL)
1736 {
1737 struct type *t;
1738 struct type *elem;
1739
1740 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1741 TYPE_CODE_UNION);
1742
1743 elem = builtin_type (gdbarch)->builtin_uint128;
1744 append_composite_type_field (t, "u", elem);
1745
1746 elem = builtin_type (gdbarch)->builtin_int128;
1747 append_composite_type_field (t, "s", elem);
1748
1749 tdep->vnq_type = t;
1750 }
1751
1752 return tdep->vnq_type;
1753 }
1754
1755 /* Return the type for an AdvSISD D register. */
1756
1757 static struct type *
1758 aarch64_vnd_type (struct gdbarch *gdbarch)
1759 {
1760 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1761
1762 if (tdep->vnd_type == NULL)
1763 {
1764 struct type *t;
1765 struct type *elem;
1766
1767 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1768 TYPE_CODE_UNION);
1769
1770 elem = builtin_type (gdbarch)->builtin_double;
1771 append_composite_type_field (t, "f", elem);
1772
1773 elem = builtin_type (gdbarch)->builtin_uint64;
1774 append_composite_type_field (t, "u", elem);
1775
1776 elem = builtin_type (gdbarch)->builtin_int64;
1777 append_composite_type_field (t, "s", elem);
1778
1779 tdep->vnd_type = t;
1780 }
1781
1782 return tdep->vnd_type;
1783 }
1784
1785 /* Return the type for an AdvSISD S register. */
1786
1787 static struct type *
1788 aarch64_vns_type (struct gdbarch *gdbarch)
1789 {
1790 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1791
1792 if (tdep->vns_type == NULL)
1793 {
1794 struct type *t;
1795 struct type *elem;
1796
1797 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1798 TYPE_CODE_UNION);
1799
1800 elem = builtin_type (gdbarch)->builtin_float;
1801 append_composite_type_field (t, "f", elem);
1802
1803 elem = builtin_type (gdbarch)->builtin_uint32;
1804 append_composite_type_field (t, "u", elem);
1805
1806 elem = builtin_type (gdbarch)->builtin_int32;
1807 append_composite_type_field (t, "s", elem);
1808
1809 tdep->vns_type = t;
1810 }
1811
1812 return tdep->vns_type;
1813 }
1814
1815 /* Return the type for an AdvSISD H register. */
1816
1817 static struct type *
1818 aarch64_vnh_type (struct gdbarch *gdbarch)
1819 {
1820 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1821
1822 if (tdep->vnh_type == NULL)
1823 {
1824 struct type *t;
1825 struct type *elem;
1826
1827 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1828 TYPE_CODE_UNION);
1829
1830 elem = builtin_type (gdbarch)->builtin_uint16;
1831 append_composite_type_field (t, "u", elem);
1832
1833 elem = builtin_type (gdbarch)->builtin_int16;
1834 append_composite_type_field (t, "s", elem);
1835
1836 tdep->vnh_type = t;
1837 }
1838
1839 return tdep->vnh_type;
1840 }
1841
1842 /* Return the type for an AdvSISD B register. */
1843
1844 static struct type *
1845 aarch64_vnb_type (struct gdbarch *gdbarch)
1846 {
1847 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1848
1849 if (tdep->vnb_type == NULL)
1850 {
1851 struct type *t;
1852 struct type *elem;
1853
1854 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1855 TYPE_CODE_UNION);
1856
1857 elem = builtin_type (gdbarch)->builtin_uint8;
1858 append_composite_type_field (t, "u", elem);
1859
1860 elem = builtin_type (gdbarch)->builtin_int8;
1861 append_composite_type_field (t, "s", elem);
1862
1863 tdep->vnb_type = t;
1864 }
1865
1866 return tdep->vnb_type;
1867 }
1868
1869 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1870
1871 static int
1872 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1873 {
1874 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1875 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1876
1877 if (reg == AARCH64_DWARF_SP)
1878 return AARCH64_SP_REGNUM;
1879
1880 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1881 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1882
1883 return -1;
1884 }
1885
1886
1887 /* Implement the "print_insn" gdbarch method. */
1888
1889 static int
1890 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1891 {
1892 info->symbols = NULL;
1893 return print_insn_aarch64 (memaddr, info);
1894 }
1895
1896 /* AArch64 BRK software debug mode instruction.
1897 Note that AArch64 code is always little-endian.
1898 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1899 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1900
1901 /* Implement the "breakpoint_from_pc" gdbarch method. */
1902
1903 static const gdb_byte *
1904 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1905 int *lenptr)
1906 {
1907 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1908
1909 *lenptr = sizeof (aarch64_default_breakpoint);
1910 return aarch64_default_breakpoint;
1911 }
1912
1913 /* Extract from an array REGS containing the (raw) register state a
1914 function return value of type TYPE, and copy that, in virtual
1915 format, into VALBUF. */
1916
1917 static void
1918 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1919 gdb_byte *valbuf)
1920 {
1921 struct gdbarch *gdbarch = get_regcache_arch (regs);
1922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1923
1924 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1925 {
1926 bfd_byte buf[V_REGISTER_SIZE];
1927 int len = TYPE_LENGTH (type);
1928
1929 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1930 memcpy (valbuf, buf, len);
1931 }
1932 else if (TYPE_CODE (type) == TYPE_CODE_INT
1933 || TYPE_CODE (type) == TYPE_CODE_CHAR
1934 || TYPE_CODE (type) == TYPE_CODE_BOOL
1935 || TYPE_CODE (type) == TYPE_CODE_PTR
1936 || TYPE_CODE (type) == TYPE_CODE_REF
1937 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1938 {
1939 /* If the the type is a plain integer, then the access is
1940 straight-forward. Otherwise we have to play around a bit
1941 more. */
1942 int len = TYPE_LENGTH (type);
1943 int regno = AARCH64_X0_REGNUM;
1944 ULONGEST tmp;
1945
1946 while (len > 0)
1947 {
1948 /* By using store_unsigned_integer we avoid having to do
1949 anything special for small big-endian values. */
1950 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1951 store_unsigned_integer (valbuf,
1952 (len > X_REGISTER_SIZE
1953 ? X_REGISTER_SIZE : len), byte_order, tmp);
1954 len -= X_REGISTER_SIZE;
1955 valbuf += X_REGISTER_SIZE;
1956 }
1957 }
1958 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1959 {
1960 int regno = AARCH64_V0_REGNUM;
1961 bfd_byte buf[V_REGISTER_SIZE];
1962 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1963 int len = TYPE_LENGTH (target_type);
1964
1965 regcache_cooked_read (regs, regno, buf);
1966 memcpy (valbuf, buf, len);
1967 valbuf += len;
1968 regcache_cooked_read (regs, regno + 1, buf);
1969 memcpy (valbuf, buf, len);
1970 valbuf += len;
1971 }
1972 else if (is_hfa (type))
1973 {
1974 int elements = TYPE_NFIELDS (type);
1975 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1976 int len = TYPE_LENGTH (member_type);
1977 int i;
1978
1979 for (i = 0; i < elements; i++)
1980 {
1981 int regno = AARCH64_V0_REGNUM + i;
1982 bfd_byte buf[X_REGISTER_SIZE];
1983
1984 if (aarch64_debug)
1985 fprintf_unfiltered (gdb_stdlog,
1986 "read HFA return value element %d from %s\n",
1987 i + 1,
1988 gdbarch_register_name (gdbarch, regno));
1989 regcache_cooked_read (regs, regno, buf);
1990
1991 memcpy (valbuf, buf, len);
1992 valbuf += len;
1993 }
1994 }
1995 else
1996 {
1997 /* For a structure or union the behaviour is as if the value had
1998 been stored to word-aligned memory and then loaded into
1999 registers with 64-bit load instruction(s). */
2000 int len = TYPE_LENGTH (type);
2001 int regno = AARCH64_X0_REGNUM;
2002 bfd_byte buf[X_REGISTER_SIZE];
2003
2004 while (len > 0)
2005 {
2006 regcache_cooked_read (regs, regno++, buf);
2007 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2008 len -= X_REGISTER_SIZE;
2009 valbuf += X_REGISTER_SIZE;
2010 }
2011 }
2012 }
2013
2014
2015 /* Will a function return an aggregate type in memory or in a
2016 register? Return 0 if an aggregate type can be returned in a
2017 register, 1 if it must be returned in memory. */
2018
2019 static int
2020 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2021 {
2022 int nRc;
2023 enum type_code code;
2024
2025 CHECK_TYPEDEF (type);
2026
2027 /* In the AArch64 ABI, "integer" like aggregate types are returned
2028 in registers. For an aggregate type to be integer like, its size
2029 must be less than or equal to 4 * X_REGISTER_SIZE. */
2030
2031 if (is_hfa (type))
2032 {
2033 /* PCS B.5 If the argument is a Named HFA, then the argument is
2034 used unmodified. */
2035 return 0;
2036 }
2037
2038 if (TYPE_LENGTH (type) > 16)
2039 {
2040 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2041 invisible reference. */
2042
2043 return 1;
2044 }
2045
2046 return 0;
2047 }
2048
2049 /* Write into appropriate registers a function return value of type
2050 TYPE, given in virtual format. */
2051
2052 static void
2053 aarch64_store_return_value (struct type *type, struct regcache *regs,
2054 const gdb_byte *valbuf)
2055 {
2056 struct gdbarch *gdbarch = get_regcache_arch (regs);
2057 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2058
2059 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2060 {
2061 bfd_byte buf[V_REGISTER_SIZE];
2062 int len = TYPE_LENGTH (type);
2063
2064 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2065 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2066 }
2067 else if (TYPE_CODE (type) == TYPE_CODE_INT
2068 || TYPE_CODE (type) == TYPE_CODE_CHAR
2069 || TYPE_CODE (type) == TYPE_CODE_BOOL
2070 || TYPE_CODE (type) == TYPE_CODE_PTR
2071 || TYPE_CODE (type) == TYPE_CODE_REF
2072 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2073 {
2074 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2075 {
2076 /* Values of one word or less are zero/sign-extended and
2077 returned in r0. */
2078 bfd_byte tmpbuf[X_REGISTER_SIZE];
2079 LONGEST val = unpack_long (type, valbuf);
2080
2081 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2082 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2083 }
2084 else
2085 {
2086 /* Integral values greater than one word are stored in
2087 consecutive registers starting with r0. This will always
2088 be a multiple of the regiser size. */
2089 int len = TYPE_LENGTH (type);
2090 int regno = AARCH64_X0_REGNUM;
2091
2092 while (len > 0)
2093 {
2094 regcache_cooked_write (regs, regno++, valbuf);
2095 len -= X_REGISTER_SIZE;
2096 valbuf += X_REGISTER_SIZE;
2097 }
2098 }
2099 }
2100 else if (is_hfa (type))
2101 {
2102 int elements = TYPE_NFIELDS (type);
2103 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2104 int len = TYPE_LENGTH (member_type);
2105 int i;
2106
2107 for (i = 0; i < elements; i++)
2108 {
2109 int regno = AARCH64_V0_REGNUM + i;
2110 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2111
2112 if (aarch64_debug)
2113 fprintf_unfiltered (gdb_stdlog,
2114 "write HFA return value element %d to %s\n",
2115 i + 1,
2116 gdbarch_register_name (gdbarch, regno));
2117
2118 memcpy (tmpbuf, valbuf, len);
2119 regcache_cooked_write (regs, regno, tmpbuf);
2120 valbuf += len;
2121 }
2122 }
2123 else
2124 {
2125 /* For a structure or union the behaviour is as if the value had
2126 been stored to word-aligned memory and then loaded into
2127 registers with 64-bit load instruction(s). */
2128 int len = TYPE_LENGTH (type);
2129 int regno = AARCH64_X0_REGNUM;
2130 bfd_byte tmpbuf[X_REGISTER_SIZE];
2131
2132 while (len > 0)
2133 {
2134 memcpy (tmpbuf, valbuf,
2135 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2136 regcache_cooked_write (regs, regno++, tmpbuf);
2137 len -= X_REGISTER_SIZE;
2138 valbuf += X_REGISTER_SIZE;
2139 }
2140 }
2141 }
2142
2143 /* Implement the "return_value" gdbarch method. */
2144
2145 static enum return_value_convention
2146 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2147 struct type *valtype, struct regcache *regcache,
2148 gdb_byte *readbuf, const gdb_byte *writebuf)
2149 {
2150 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2151
2152 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2153 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2154 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2155 {
2156 if (aarch64_return_in_memory (gdbarch, valtype))
2157 {
2158 if (aarch64_debug)
2159 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2160 return RETURN_VALUE_STRUCT_CONVENTION;
2161 }
2162 }
2163
2164 if (writebuf)
2165 aarch64_store_return_value (valtype, regcache, writebuf);
2166
2167 if (readbuf)
2168 aarch64_extract_return_value (valtype, regcache, readbuf);
2169
2170 if (aarch64_debug)
2171 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2172
2173 return RETURN_VALUE_REGISTER_CONVENTION;
2174 }
2175
2176 /* Implement the "get_longjmp_target" gdbarch method. */
2177
2178 static int
2179 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2180 {
2181 CORE_ADDR jb_addr;
2182 gdb_byte buf[X_REGISTER_SIZE];
2183 struct gdbarch *gdbarch = get_frame_arch (frame);
2184 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2185 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2186
2187 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2188
2189 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2190 X_REGISTER_SIZE))
2191 return 0;
2192
2193 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2194 return 1;
2195 }
2196
2197
2198 /* Return the pseudo register name corresponding to register regnum. */
2199
2200 static const char *
2201 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2202 {
2203 static const char *const q_name[] =
2204 {
2205 "q0", "q1", "q2", "q3",
2206 "q4", "q5", "q6", "q7",
2207 "q8", "q9", "q10", "q11",
2208 "q12", "q13", "q14", "q15",
2209 "q16", "q17", "q18", "q19",
2210 "q20", "q21", "q22", "q23",
2211 "q24", "q25", "q26", "q27",
2212 "q28", "q29", "q30", "q31",
2213 };
2214
2215 static const char *const d_name[] =
2216 {
2217 "d0", "d1", "d2", "d3",
2218 "d4", "d5", "d6", "d7",
2219 "d8", "d9", "d10", "d11",
2220 "d12", "d13", "d14", "d15",
2221 "d16", "d17", "d18", "d19",
2222 "d20", "d21", "d22", "d23",
2223 "d24", "d25", "d26", "d27",
2224 "d28", "d29", "d30", "d31",
2225 };
2226
2227 static const char *const s_name[] =
2228 {
2229 "s0", "s1", "s2", "s3",
2230 "s4", "s5", "s6", "s7",
2231 "s8", "s9", "s10", "s11",
2232 "s12", "s13", "s14", "s15",
2233 "s16", "s17", "s18", "s19",
2234 "s20", "s21", "s22", "s23",
2235 "s24", "s25", "s26", "s27",
2236 "s28", "s29", "s30", "s31",
2237 };
2238
2239 static const char *const h_name[] =
2240 {
2241 "h0", "h1", "h2", "h3",
2242 "h4", "h5", "h6", "h7",
2243 "h8", "h9", "h10", "h11",
2244 "h12", "h13", "h14", "h15",
2245 "h16", "h17", "h18", "h19",
2246 "h20", "h21", "h22", "h23",
2247 "h24", "h25", "h26", "h27",
2248 "h28", "h29", "h30", "h31",
2249 };
2250
2251 static const char *const b_name[] =
2252 {
2253 "b0", "b1", "b2", "b3",
2254 "b4", "b5", "b6", "b7",
2255 "b8", "b9", "b10", "b11",
2256 "b12", "b13", "b14", "b15",
2257 "b16", "b17", "b18", "b19",
2258 "b20", "b21", "b22", "b23",
2259 "b24", "b25", "b26", "b27",
2260 "b28", "b29", "b30", "b31",
2261 };
2262
2263 regnum -= gdbarch_num_regs (gdbarch);
2264
2265 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2266 return q_name[regnum - AARCH64_Q0_REGNUM];
2267
2268 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2269 return d_name[regnum - AARCH64_D0_REGNUM];
2270
2271 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2272 return s_name[regnum - AARCH64_S0_REGNUM];
2273
2274 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2275 return h_name[regnum - AARCH64_H0_REGNUM];
2276
2277 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2278 return b_name[regnum - AARCH64_B0_REGNUM];
2279
2280 internal_error (__FILE__, __LINE__,
2281 _("aarch64_pseudo_register_name: bad register number %d"),
2282 regnum);
2283 }
2284
2285 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2286
2287 static struct type *
2288 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2289 {
2290 regnum -= gdbarch_num_regs (gdbarch);
2291
2292 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2293 return aarch64_vnq_type (gdbarch);
2294
2295 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2296 return aarch64_vnd_type (gdbarch);
2297
2298 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2299 return aarch64_vns_type (gdbarch);
2300
2301 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2302 return aarch64_vnh_type (gdbarch);
2303
2304 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2305 return aarch64_vnb_type (gdbarch);
2306
2307 internal_error (__FILE__, __LINE__,
2308 _("aarch64_pseudo_register_type: bad register number %d"),
2309 regnum);
2310 }
2311
2312 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2313
2314 static int
2315 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2316 struct reggroup *group)
2317 {
2318 regnum -= gdbarch_num_regs (gdbarch);
2319
2320 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2321 return group == all_reggroup || group == vector_reggroup;
2322 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2323 return (group == all_reggroup || group == vector_reggroup
2324 || group == float_reggroup);
2325 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2326 return (group == all_reggroup || group == vector_reggroup
2327 || group == float_reggroup);
2328 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2329 return group == all_reggroup || group == vector_reggroup;
2330 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2331 return group == all_reggroup || group == vector_reggroup;
2332
2333 return group == all_reggroup;
2334 }
2335
2336 /* Implement the "pseudo_register_read_value" gdbarch method. */
2337
2338 static struct value *
2339 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2340 struct regcache *regcache,
2341 int regnum)
2342 {
2343 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2344 struct value *result_value;
2345 gdb_byte *buf;
2346
2347 result_value = allocate_value (register_type (gdbarch, regnum));
2348 VALUE_LVAL (result_value) = lval_register;
2349 VALUE_REGNUM (result_value) = regnum;
2350 buf = value_contents_raw (result_value);
2351
2352 regnum -= gdbarch_num_regs (gdbarch);
2353
2354 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2355 {
2356 enum register_status status;
2357 unsigned v_regnum;
2358
2359 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2360 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2361 if (status != REG_VALID)
2362 mark_value_bytes_unavailable (result_value, 0,
2363 TYPE_LENGTH (value_type (result_value)));
2364 else
2365 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2366 return result_value;
2367 }
2368
2369 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2370 {
2371 enum register_status status;
2372 unsigned v_regnum;
2373
2374 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2375 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2376 if (status != REG_VALID)
2377 mark_value_bytes_unavailable (result_value, 0,
2378 TYPE_LENGTH (value_type (result_value)));
2379 else
2380 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2381 return result_value;
2382 }
2383
2384 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2385 {
2386 enum register_status status;
2387 unsigned v_regnum;
2388
2389 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2390 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2391 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2392 return result_value;
2393 }
2394
2395 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2396 {
2397 enum register_status status;
2398 unsigned v_regnum;
2399
2400 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2401 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2402 if (status != REG_VALID)
2403 mark_value_bytes_unavailable (result_value, 0,
2404 TYPE_LENGTH (value_type (result_value)));
2405 else
2406 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2407 return result_value;
2408 }
2409
2410 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2411 {
2412 enum register_status status;
2413 unsigned v_regnum;
2414
2415 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2416 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2417 if (status != REG_VALID)
2418 mark_value_bytes_unavailable (result_value, 0,
2419 TYPE_LENGTH (value_type (result_value)));
2420 else
2421 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2422 return result_value;
2423 }
2424
2425 gdb_assert_not_reached ("regnum out of bound");
2426 }
2427
2428 /* Implement the "pseudo_register_write" gdbarch method. */
2429
2430 static void
2431 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2432 int regnum, const gdb_byte *buf)
2433 {
2434 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2435
2436 /* Ensure the register buffer is zero, we want gdb writes of the
2437 various 'scalar' pseudo registers to behavior like architectural
2438 writes, register width bytes are written the remainder are set to
2439 zero. */
2440 memset (reg_buf, 0, sizeof (reg_buf));
2441
2442 regnum -= gdbarch_num_regs (gdbarch);
2443
2444 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2445 {
2446 /* pseudo Q registers */
2447 unsigned v_regnum;
2448
2449 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2450 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2451 regcache_raw_write (regcache, v_regnum, reg_buf);
2452 return;
2453 }
2454
2455 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2456 {
2457 /* pseudo D registers */
2458 unsigned v_regnum;
2459
2460 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2461 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2462 regcache_raw_write (regcache, v_regnum, reg_buf);
2463 return;
2464 }
2465
2466 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2467 {
2468 unsigned v_regnum;
2469
2470 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2471 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2472 regcache_raw_write (regcache, v_regnum, reg_buf);
2473 return;
2474 }
2475
2476 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2477 {
2478 /* pseudo H registers */
2479 unsigned v_regnum;
2480
2481 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2482 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2483 regcache_raw_write (regcache, v_regnum, reg_buf);
2484 return;
2485 }
2486
2487 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2488 {
2489 /* pseudo B registers */
2490 unsigned v_regnum;
2491
2492 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2493 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2494 regcache_raw_write (regcache, v_regnum, reg_buf);
2495 return;
2496 }
2497
2498 gdb_assert_not_reached ("regnum out of bound");
2499 }
2500
2501 /* Callback function for user_reg_add. */
2502
2503 static struct value *
2504 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2505 {
2506 const int *reg_p = baton;
2507
2508 return value_of_register (*reg_p, frame);
2509 }
2510
2511
2512 /* Initialize the current architecture based on INFO. If possible,
2513 re-use an architecture from ARCHES, which is a list of
2514 architectures already created during this debugging session.
2515
2516 Called e.g. at program startup, when reading a core file, and when
2517 reading a binary file. */
2518
2519 static struct gdbarch *
2520 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2521 {
2522 struct gdbarch_tdep *tdep;
2523 struct gdbarch *gdbarch;
2524 struct gdbarch_list *best_arch;
2525 struct tdesc_arch_data *tdesc_data = NULL;
2526 const struct target_desc *tdesc = info.target_desc;
2527 int i;
2528 int have_fpa_registers = 1;
2529 int valid_p = 1;
2530 const struct tdesc_feature *feature;
2531 int num_regs = 0;
2532 int num_pseudo_regs = 0;
2533
2534 /* Ensure we always have a target descriptor. */
2535 if (!tdesc_has_registers (tdesc))
2536 tdesc = tdesc_aarch64;
2537
2538 gdb_assert (tdesc);
2539
2540 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2541
2542 if (feature == NULL)
2543 return NULL;
2544
2545 tdesc_data = tdesc_data_alloc ();
2546
2547 /* Validate the descriptor provides the mandatory core R registers
2548 and allocate their numbers. */
2549 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2550 valid_p &=
2551 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2552 aarch64_r_register_names[i]);
2553
2554 num_regs = AARCH64_X0_REGNUM + i;
2555
2556 /* Look for the V registers. */
2557 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2558 if (feature)
2559 {
2560 /* Validate the descriptor provides the mandatory V registers
2561 and allocate their numbers. */
2562 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2563 valid_p &=
2564 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2565 aarch64_v_register_names[i]);
2566
2567 num_regs = AARCH64_V0_REGNUM + i;
2568
2569 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2570 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2571 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2572 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2573 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2574 }
2575
2576 if (!valid_p)
2577 {
2578 tdesc_data_cleanup (tdesc_data);
2579 return NULL;
2580 }
2581
2582 /* AArch64 code is always little-endian. */
2583 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2584
2585 /* If there is already a candidate, use it. */
2586 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2587 best_arch != NULL;
2588 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2589 {
2590 /* Found a match. */
2591 break;
2592 }
2593
2594 if (best_arch != NULL)
2595 {
2596 if (tdesc_data != NULL)
2597 tdesc_data_cleanup (tdesc_data);
2598 return best_arch->gdbarch;
2599 }
2600
2601 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2602 gdbarch = gdbarch_alloc (&info, tdep);
2603
2604 /* This should be low enough for everything. */
2605 tdep->lowest_pc = 0x20;
2606 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2607 tdep->jb_elt_size = 8;
2608
2609 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2610 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2611
2612 /* Frame handling. */
2613 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2614 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2615 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2616
2617 /* Advance PC across function entry code. */
2618 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2619
2620 /* The stack grows downward. */
2621 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2622
2623 /* Breakpoint manipulation. */
2624 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2625 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2626 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2627
2628 /* Information about registers, etc. */
2629 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2630 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2631 set_gdbarch_num_regs (gdbarch, num_regs);
2632
2633 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2634 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2635 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2636 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2637 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2638 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2639 aarch64_pseudo_register_reggroup_p);
2640
2641 /* ABI */
2642 set_gdbarch_short_bit (gdbarch, 16);
2643 set_gdbarch_int_bit (gdbarch, 32);
2644 set_gdbarch_float_bit (gdbarch, 32);
2645 set_gdbarch_double_bit (gdbarch, 64);
2646 set_gdbarch_long_double_bit (gdbarch, 128);
2647 set_gdbarch_long_bit (gdbarch, 64);
2648 set_gdbarch_long_long_bit (gdbarch, 64);
2649 set_gdbarch_ptr_bit (gdbarch, 64);
2650 set_gdbarch_char_signed (gdbarch, 0);
2651 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2652 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2653 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2654
2655 /* Internal <-> external register number maps. */
2656 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2657
2658 /* Returning results. */
2659 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2660
2661 /* Disassembly. */
2662 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2663
2664 /* Virtual tables. */
2665 set_gdbarch_vbit_in_delta (gdbarch, 1);
2666
2667 /* Hook in the ABI-specific overrides, if they have been registered. */
2668 info.target_desc = tdesc;
2669 info.tdep_info = (void *) tdesc_data;
2670 gdbarch_init_osabi (info, gdbarch);
2671
2672 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2673
2674 /* Add some default predicates. */
2675 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2676 dwarf2_append_unwinders (gdbarch);
2677 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2678
2679 frame_base_set_default (gdbarch, &aarch64_normal_base);
2680
2681 /* Now we have tuned the configuration, set a few final things,
2682 based on what the OS ABI has told us. */
2683
2684 if (tdep->jb_pc >= 0)
2685 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2686
2687 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2688
2689 /* Add standard register aliases. */
2690 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2691 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2692 value_of_aarch64_user_reg,
2693 &aarch64_register_aliases[i].regnum);
2694
2695 return gdbarch;
2696 }
2697
2698 static void
2699 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2700 {
2701 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2702
2703 if (tdep == NULL)
2704 return;
2705
2706 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2707 paddress (gdbarch, tdep->lowest_pc));
2708 }
2709
2710 /* Suppress warning from -Wmissing-prototypes. */
2711 extern initialize_file_ftype _initialize_aarch64_tdep;
2712
2713 void
2714 _initialize_aarch64_tdep (void)
2715 {
2716 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2717 aarch64_dump_tdep);
2718
2719 initialize_tdesc_aarch64 ();
2720
2721 /* Debug this file's internals. */
2722 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2723 Set AArch64 debugging."), _("\
2724 Show AArch64 debugging."), _("\
2725 When on, AArch64 specific debugging is enabled."),
2726 NULL,
2727 show_aarch64_debug,
2728 &setdebuglist, &showdebuglist);
2729 }
OLDNEW
« no previous file with comments | « gdb/aarch64-tdep.h ('k') | gdb/acinclude.m4 » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698