| OLD | NEW |
| 1 /* Low level interface to ptrace, for the remote server for GDB. | 1 /* Low level interface to ptrace, for the remote server for GDB. |
| 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc. | 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc. |
| 3 | 3 |
| 4 This file is part of GDB. | 4 This file is part of GDB. |
| 5 | 5 |
| 6 This program is free software; you can redistribute it and/or modify | 6 This program is free software; you can redistribute it and/or modify |
| 7 it under the terms of the GNU General Public License as published by | 7 it under the terms of the GNU General Public License as published by |
| 8 the Free Software Foundation; either version 3 of the License, or | 8 the Free Software Foundation; either version 3 of the License, or |
| 9 (at your option) any later version. | 9 (at your option) any later version. |
| 10 | 10 |
| 11 This program is distributed in the hope that it will be useful, | 11 This program is distributed in the hope that it will be useful, |
| 12 but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 GNU General Public License for more details. | 14 GNU General Public License for more details. |
| 15 | 15 |
| 16 You should have received a copy of the GNU General Public License | 16 You should have received a copy of the GNU General Public License |
| 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */ | 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| 18 | 18 |
| 19 #include "server.h" | 19 #include "server.h" |
| 20 #include "linux-low.h" | 20 #include "linux-low.h" |
| 21 #include "linux-osdata.h" | 21 #include "linux-osdata.h" |
| 22 #include "agent.h" |
| 22 | 23 |
| 23 #include <sys/wait.h> | 24 #include <sys/wait.h> |
| 24 #include <stdio.h> | 25 #include <stdio.h> |
| 25 #include <sys/param.h> | 26 #include <sys/param.h> |
| 26 #include <sys/ptrace.h> | 27 #include <sys/ptrace.h> |
| 27 #include "linux-ptrace.h" | 28 #include "linux-ptrace.h" |
| 28 #include "linux-procfs.h" | 29 #include "linux-procfs.h" |
| 29 #include <signal.h> | 30 #include <signal.h> |
| 30 #include <sys/ioctl.h> | 31 #include <sys/ioctl.h> |
| 31 #include <fcntl.h> | 32 #include <fcntl.h> |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 70 #endif | 71 #endif |
| 71 | 72 |
| 72 /* This is the kernel's hard limit. Not to be confused with | 73 /* This is the kernel's hard limit. Not to be confused with |
| 73 SIGRTMIN. */ | 74 SIGRTMIN. */ |
| 74 #ifndef __SIGRTMIN | 75 #ifndef __SIGRTMIN |
| 75 #define __SIGRTMIN 32 | 76 #define __SIGRTMIN 32 |
| 76 #endif | 77 #endif |
| 77 | 78 |
| 78 #ifdef __UCLIBC__ | 79 #ifdef __UCLIBC__ |
| 79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__)) | 80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__)) |
| 81 /* PTRACE_TEXT_ADDR and friends. */ |
| 82 #include <asm/ptrace.h> |
| 80 #define HAS_NOMMU | 83 #define HAS_NOMMU |
| 81 #endif | 84 #endif |
| 82 #endif | 85 #endif |
| 83 | 86 |
| 87 #ifndef HAVE_ELF32_AUXV_T |
| 88 /* Copied from glibc's elf.h. */ |
| 89 typedef struct |
| 90 { |
| 91 uint32_t a_type; /* Entry type */ |
| 92 union |
| 93 { |
| 94 uint32_t a_val; /* Integer value */ |
| 95 /* We use to have pointer elements added here. We cannot do that, |
| 96 though, since it does not work when using 32-bit definitions |
| 97 on 64-bit platforms and vice versa. */ |
| 98 } a_un; |
| 99 } Elf32_auxv_t; |
| 100 #endif |
| 101 |
| 102 #ifndef HAVE_ELF64_AUXV_T |
| 103 /* Copied from glibc's elf.h. */ |
| 104 typedef struct |
| 105 { |
| 106 uint64_t a_type; /* Entry type */ |
| 107 union |
| 108 { |
| 109 uint64_t a_val; /* Integer value */ |
| 110 /* We use to have pointer elements added here. We cannot do that, |
| 111 though, since it does not work when using 32-bit definitions |
| 112 on 64-bit platforms and vice versa. */ |
| 113 } a_un; |
| 114 } Elf64_auxv_t; |
| 115 #endif |
| 116 |
| 84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol | 117 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol |
| 85 representation of the thread ID. | 118 representation of the thread ID. |
| 86 | 119 |
| 87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently) | 120 ``all_lwps'' is keyed by the process ID - which on Linux is (presently) |
| 88 the same as the LWP ID. | 121 the same as the LWP ID. |
| 89 | 122 |
| 90 ``all_processes'' is keyed by the "overall process ID", which | 123 ``all_processes'' is keyed by the "overall process ID", which |
| 91 GNU/Linux calls tgid, "thread group ID". */ | 124 GNU/Linux calls tgid, "thread group ID". */ |
| 92 | 125 |
| 93 struct inferior_list all_lwps; | 126 struct inferior_list all_lwps; |
| 94 | 127 |
| 95 /* A list of all unknown processes which receive stop signals. Some other | 128 /* A list of all unknown processes which receive stop signals. Some |
| 96 process will presumably claim each of these as forked children | 129 other process will presumably claim each of these as forked |
| 97 momentarily. */ | 130 children momentarily. */ |
| 98 | 131 |
| 99 struct inferior_list stopped_pids; | 132 struct simple_pid_list |
| 133 { |
| 134 /* The process ID. */ |
| 135 int pid; |
| 100 | 136 |
| 101 /* FIXME this is a bit of a hack, and could be removed. */ | 137 /* The status as reported by waitpid. */ |
| 102 int stopping_threads; | 138 int status; |
| 139 |
| 140 /* Next in chain. */ |
| 141 struct simple_pid_list *next; |
| 142 }; |
| 143 struct simple_pid_list *stopped_pids; |
| 144 |
| 145 /* Trivial list manipulation functions to keep track of a list of new |
| 146 stopped processes. */ |
| 147 |
| 148 static void |
| 149 add_to_pid_list (struct simple_pid_list **listp, int pid, int status) |
| 150 { |
| 151 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list)); |
| 152 |
| 153 new_pid->pid = pid; |
| 154 new_pid->status = status; |
| 155 new_pid->next = *listp; |
| 156 *listp = new_pid; |
| 157 } |
| 158 |
| 159 static int |
| 160 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp) |
| 161 { |
| 162 struct simple_pid_list **p; |
| 163 |
| 164 for (p = listp; *p != NULL; p = &(*p)->next) |
| 165 if ((*p)->pid == pid) |
| 166 { |
| 167 » struct simple_pid_list *next = (*p)->next; |
| 168 |
| 169 » *statusp = (*p)->status; |
| 170 » xfree (*p); |
| 171 » *p = next; |
| 172 » return 1; |
| 173 } |
| 174 return 0; |
| 175 } |
| 176 |
| 177 enum stopping_threads_kind |
| 178 { |
| 179 /* Not stopping threads presently. */ |
| 180 NOT_STOPPING_THREADS, |
| 181 |
| 182 /* Stopping threads. */ |
| 183 STOPPING_THREADS, |
| 184 |
| 185 /* Stopping and suspending threads. */ |
| 186 STOPPING_AND_SUSPENDING_THREADS |
| 187 }; |
| 188 |
| 189 /* This is set while stop_all_lwps is in effect. */ |
| 190 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS; |
| 103 | 191 |
| 104 /* FIXME make into a target method? */ | 192 /* FIXME make into a target method? */ |
| 105 int using_threads = 1; | 193 int using_threads = 1; |
| 106 | 194 |
| 107 /* True if we're presently stabilizing threads (moving them out of | 195 /* True if we're presently stabilizing threads (moving them out of |
| 108 jump pads). */ | 196 jump pads). */ |
| 109 static int stabilizing_threads; | 197 static int stabilizing_threads; |
| 110 | 198 |
| 111 /* This flag is true iff we've just created or attached to our first | 199 /* This flag is true iff we've just created or attached to our first |
| 112 inferior but it has not stopped yet. As soon as it does, we need | 200 inferior but it has not stopped yet. As soon as it does, we need |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 160 return the_low_target.install_fast_tracepoint_jump_pad != NULL; | 248 return the_low_target.install_fast_tracepoint_jump_pad != NULL; |
| 161 } | 249 } |
| 162 | 250 |
| 163 struct pending_signals | 251 struct pending_signals |
| 164 { | 252 { |
| 165 int signal; | 253 int signal; |
| 166 siginfo_t info; | 254 siginfo_t info; |
| 167 struct pending_signals *prev; | 255 struct pending_signals *prev; |
| 168 }; | 256 }; |
| 169 | 257 |
| 170 #define PTRACE_ARG3_TYPE void * | |
| 171 #define PTRACE_ARG4_TYPE void * | |
| 172 #define PTRACE_XFER_TYPE long | |
| 173 | |
| 174 #ifdef HAVE_LINUX_REGSETS | 258 #ifdef HAVE_LINUX_REGSETS |
| 175 static char *disabled_regsets; | 259 static char *disabled_regsets; |
| 176 static int num_regsets; | 260 static int num_regsets; |
| 177 #endif | 261 #endif |
| 178 | 262 |
| 179 /* The read/write ends of the pipe registered as waitable file in the | 263 /* The read/write ends of the pipe registered as waitable file in the |
| 180 event loop. */ | 264 event loop. */ |
| 181 static int linux_event_pipe[2] = { -1, -1 }; | 265 static int linux_event_pipe[2] = { -1, -1 }; |
| 182 | 266 |
| 183 /* True if we're currently in async mode. */ | 267 /* True if we're currently in async mode. */ |
| 184 #define target_is_async_p() (linux_event_pipe[0] != -1) | 268 #define target_is_async_p() (linux_event_pipe[0] != -1) |
| 185 | 269 |
| 186 static void send_sigstop (struct lwp_info *lwp); | 270 static void send_sigstop (struct lwp_info *lwp); |
| 187 static void wait_for_sigstop (struct inferior_list_entry *entry); | 271 static void wait_for_sigstop (struct inferior_list_entry *entry); |
| 188 | 272 |
| 189 /* Accepts an integer PID; Returns a string representing a file that | |
| 190 can be opened to get info for the child process. | |
| 191 Space for the result is malloc'd, caller must free. */ | |
| 192 | |
| 193 char * | |
| 194 linux_child_pid_to_exec_file (int pid) | |
| 195 { | |
| 196 char *name1, *name2; | |
| 197 | |
| 198 name1 = xmalloc (MAXPATHLEN); | |
| 199 name2 = xmalloc (MAXPATHLEN); | |
| 200 memset (name2, 0, MAXPATHLEN); | |
| 201 | |
| 202 sprintf (name1, "/proc/%d/exe", pid); | |
| 203 if (readlink (name1, name2, MAXPATHLEN) > 0) | |
| 204 { | |
| 205 free (name1); | |
| 206 return name2; | |
| 207 } | |
| 208 else | |
| 209 { | |
| 210 free (name2); | |
| 211 return name1; | |
| 212 } | |
| 213 } | |
| 214 | |
| 215 /* Return non-zero if HEADER is a 64-bit ELF file. */ | 273 /* Return non-zero if HEADER is a 64-bit ELF file. */ |
| 216 | 274 |
| 217 static int | 275 static int |
| 218 elf_64_header_p (const Elf64_Ehdr *header) | 276 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine) |
| 219 { | 277 { |
| 220 return (header->e_ident[EI_MAG0] == ELFMAG0 | 278 if (header->e_ident[EI_MAG0] == ELFMAG0 |
| 221 && header->e_ident[EI_MAG1] == ELFMAG1 | 279 && header->e_ident[EI_MAG1] == ELFMAG1 |
| 222 && header->e_ident[EI_MAG2] == ELFMAG2 | 280 && header->e_ident[EI_MAG2] == ELFMAG2 |
| 223 && header->e_ident[EI_MAG3] == ELFMAG3 | 281 && header->e_ident[EI_MAG3] == ELFMAG3) |
| 224 && header->e_ident[EI_CLASS] == ELFCLASS64); | 282 { |
| 283 *machine = header->e_machine; |
| 284 return header->e_ident[EI_CLASS] == ELFCLASS64; |
| 285 |
| 286 } |
| 287 *machine = EM_NONE; |
| 288 return -1; |
| 225 } | 289 } |
| 226 | 290 |
| 227 /* Return non-zero if FILE is a 64-bit ELF file, | 291 /* Return non-zero if FILE is a 64-bit ELF file, |
| 228 zero if the file is not a 64-bit ELF file, | 292 zero if the file is not a 64-bit ELF file, |
| 229 and -1 if the file is not accessible or doesn't exist. */ | 293 and -1 if the file is not accessible or doesn't exist. */ |
| 230 | 294 |
| 231 int | 295 static int |
| 232 elf_64_file_p (const char *file) | 296 elf_64_file_p (const char *file, unsigned int *machine) |
| 233 { | 297 { |
| 234 Elf64_Ehdr header; | 298 Elf64_Ehdr header; |
| 235 int fd; | 299 int fd; |
| 236 | 300 |
| 237 fd = open (file, O_RDONLY); | 301 fd = open (file, O_RDONLY); |
| 238 if (fd < 0) | 302 if (fd < 0) |
| 239 return -1; | 303 return -1; |
| 240 | 304 |
| 241 if (read (fd, &header, sizeof (header)) != sizeof (header)) | 305 if (read (fd, &header, sizeof (header)) != sizeof (header)) |
| 242 { | 306 { |
| 243 close (fd); | 307 close (fd); |
| 244 return 0; | 308 return 0; |
| 245 } | 309 } |
| 246 close (fd); | 310 close (fd); |
| 247 | 311 |
| 248 return elf_64_header_p (&header); | 312 return elf_64_header_p (&header, machine); |
| 313 } |
| 314 |
| 315 /* Accepts an integer PID; Returns true if the executable PID is |
| 316 running is a 64-bit ELF file.. */ |
| 317 |
| 318 int |
| 319 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine) |
| 320 { |
| 321 char file[MAXPATHLEN]; |
| 322 |
| 323 sprintf (file, "/proc/%d/exe", pid); |
| 324 return elf_64_file_p (file, machine); |
| 249 } | 325 } |
| 250 | 326 |
| 251 static void | 327 static void |
| 252 delete_lwp (struct lwp_info *lwp) | 328 delete_lwp (struct lwp_info *lwp) |
| 253 { | 329 { |
| 254 remove_thread (get_lwp_thread (lwp)); | 330 remove_thread (get_lwp_thread (lwp)); |
| 255 remove_inferior (&all_lwps, &lwp->head); | 331 remove_inferior (&all_lwps, &lwp->head); |
| 256 free (lwp->arch_private); | 332 free (lwp->arch_private); |
| 257 free (lwp); | 333 free (lwp); |
| 258 } | 334 } |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 360 static void | 436 static void |
| 361 handle_extended_wait (struct lwp_info *event_child, int wstat) | 437 handle_extended_wait (struct lwp_info *event_child, int wstat) |
| 362 { | 438 { |
| 363 int event = wstat >> 16; | 439 int event = wstat >> 16; |
| 364 struct lwp_info *new_lwp; | 440 struct lwp_info *new_lwp; |
| 365 | 441 |
| 366 if (event == PTRACE_EVENT_CLONE) | 442 if (event == PTRACE_EVENT_CLONE) |
| 367 { | 443 { |
| 368 ptid_t ptid; | 444 ptid_t ptid; |
| 369 unsigned long new_pid; | 445 unsigned long new_pid; |
| 370 int ret, status = W_STOPCODE (SIGSTOP); | 446 int ret, status; |
| 371 | 447 |
| 372 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid); | 448 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid); |
| 373 | 449 |
| 374 /* If we haven't already seen the new PID stop, wait for it now. */ | 450 /* If we haven't already seen the new PID stop, wait for it now. */ |
| 375 if (! pull_pid_from_list (&stopped_pids, new_pid)) | 451 if (!pull_pid_from_list (&stopped_pids, new_pid, &status)) |
| 376 { | 452 { |
| 377 /* The new child has a pending SIGSTOP. We can't affect it until it | 453 /* The new child has a pending SIGSTOP. We can't affect it until it |
| 378 hits the SIGSTOP, but we're already attached. */ | 454 hits the SIGSTOP, but we're already attached. */ |
| 379 | 455 |
| 380 ret = my_waitpid (new_pid, &status, __WALL); | 456 ret = my_waitpid (new_pid, &status, __WALL); |
| 381 | 457 |
| 382 if (ret == -1) | 458 if (ret == -1) |
| 383 perror_with_name ("waiting for new child"); | 459 perror_with_name ("waiting for new child"); |
| 384 else if (ret != new_pid) | 460 else if (ret != new_pid) |
| 385 warning ("wait returned unexpected PID %d", ret); | 461 warning ("wait returned unexpected PID %d", ret); |
| 386 else if (!WIFSTOPPED (status)) | 462 else if (!WIFSTOPPED (status)) |
| 387 warning ("wait returned unexpected status 0x%x", status); | 463 warning ("wait returned unexpected status 0x%x", status); |
| 388 } | 464 } |
| 389 | 465 |
| 390 linux_enable_event_reporting (new_pid); | 466 linux_enable_event_reporting (new_pid); |
| 391 | 467 |
| 392 ptid = ptid_build (pid_of (event_child), new_pid, 0); | 468 ptid = ptid_build (pid_of (event_child), new_pid, 0); |
| 393 new_lwp = (struct lwp_info *) add_lwp (ptid); | 469 new_lwp = (struct lwp_info *) add_lwp (ptid); |
| 394 add_thread (ptid, new_lwp); | 470 add_thread (ptid, new_lwp); |
| 395 | 471 |
| 396 /* Either we're going to immediately resume the new thread | 472 /* Either we're going to immediately resume the new thread |
| 397 or leave it stopped. linux_resume_one_lwp is a nop if it | 473 or leave it stopped. linux_resume_one_lwp is a nop if it |
| 398 thinks the thread is currently running, so set this first | 474 thinks the thread is currently running, so set this first |
| 399 before calling linux_resume_one_lwp. */ | 475 before calling linux_resume_one_lwp. */ |
| 400 new_lwp->stopped = 1; | 476 new_lwp->stopped = 1; |
| 401 | 477 |
| 478 /* If we're suspending all threads, leave this one suspended |
| 479 too. */ |
| 480 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS) |
| 481 new_lwp->suspended = 1; |
| 482 |
| 402 /* Normally we will get the pending SIGSTOP. But in some cases | 483 /* Normally we will get the pending SIGSTOP. But in some cases |
| 403 we might get another signal delivered to the group first. | 484 we might get another signal delivered to the group first. |
| 404 If we do get another signal, be sure not to lose it. */ | 485 If we do get another signal, be sure not to lose it. */ |
| 405 if (WSTOPSIG (status) == SIGSTOP) | 486 if (WSTOPSIG (status) == SIGSTOP) |
| 406 { | 487 { |
| 407 » if (stopping_threads) | 488 » if (stopping_threads != NOT_STOPPING_THREADS) |
| 408 new_lwp->stop_pc = get_stop_pc (new_lwp); | 489 new_lwp->stop_pc = get_stop_pc (new_lwp); |
| 409 else | 490 else |
| 410 linux_resume_one_lwp (new_lwp, 0, 0, NULL); | 491 linux_resume_one_lwp (new_lwp, 0, 0, NULL); |
| 411 } | 492 } |
| 412 else | 493 else |
| 413 { | 494 { |
| 414 new_lwp->stop_expected = 1; | 495 new_lwp->stop_expected = 1; |
| 415 | 496 |
| 416 » if (stopping_threads) | 497 » if (stopping_threads != NOT_STOPPING_THREADS) |
| 417 { | 498 { |
| 418 new_lwp->stop_pc = get_stop_pc (new_lwp); | 499 new_lwp->stop_pc = get_stop_pc (new_lwp); |
| 419 new_lwp->status_pending_p = 1; | 500 new_lwp->status_pending_p = 1; |
| 420 new_lwp->status_pending = status; | 501 new_lwp->status_pending = status; |
| 421 } | 502 } |
| 422 else | 503 else |
| 423 /* Pass the signal on. This is what GDB does - except | 504 /* Pass the signal on. This is what GDB does - except |
| 424 shouldn't we really report it instead? */ | 505 shouldn't we really report it instead? */ |
| 425 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL); | 506 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL); |
| 426 } | 507 } |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 561 if (pid == 0) | 642 if (pid == 0) |
| 562 { | 643 { |
| 563 ptrace (PTRACE_TRACEME, 0, 0, 0); | 644 ptrace (PTRACE_TRACEME, 0, 0, 0); |
| 564 | 645 |
| 565 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */ | 646 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */ |
| 566 signal (__SIGRTMIN + 1, SIG_DFL); | 647 signal (__SIGRTMIN + 1, SIG_DFL); |
| 567 #endif | 648 #endif |
| 568 | 649 |
| 569 setpgid (0, 0); | 650 setpgid (0, 0); |
| 570 | 651 |
| 652 /* If gdbserver is connected to gdb via stdio, redirect the inferior's |
| 653 stdout to stderr so that inferior i/o doesn't corrupt the connection. |
| 654 Also, redirect stdin to /dev/null. */ |
| 655 if (remote_connection_is_stdio ()) |
| 656 { |
| 657 close (0); |
| 658 open ("/dev/null", O_RDONLY); |
| 659 dup2 (2, 1); |
| 660 if (write (2, "stdin/stdout redirected\n", |
| 661 sizeof ("stdin/stdout redirected\n") - 1) < 0) |
| 662 /* Errors ignored. */; |
| 663 } |
| 664 |
| 571 execv (program, allargs); | 665 execv (program, allargs); |
| 572 if (errno == ENOENT) | 666 if (errno == ENOENT) |
| 573 execvp (program, allargs); | 667 execvp (program, allargs); |
| 574 | 668 |
| 575 fprintf (stderr, "Cannot exec %s: %s.\n", program, | 669 fprintf (stderr, "Cannot exec %s: %s.\n", program, |
| 576 strerror (errno)); | 670 strerror (errno)); |
| 577 fflush (stderr); | 671 fflush (stderr); |
| 578 _exit (0177); | 672 _exit (0177); |
| 579 } | 673 } |
| 580 | 674 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 602 /* Attach to an inferior process. */ | 696 /* Attach to an inferior process. */ |
| 603 | 697 |
| 604 static void | 698 static void |
| 605 linux_attach_lwp_1 (unsigned long lwpid, int initial) | 699 linux_attach_lwp_1 (unsigned long lwpid, int initial) |
| 606 { | 700 { |
| 607 ptid_t ptid; | 701 ptid_t ptid; |
| 608 struct lwp_info *new_lwp; | 702 struct lwp_info *new_lwp; |
| 609 | 703 |
| 610 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0) | 704 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0) |
| 611 { | 705 { |
| 706 struct buffer buffer; |
| 707 |
| 612 if (!initial) | 708 if (!initial) |
| 613 { | 709 { |
| 614 /* If we fail to attach to an LWP, just warn. */ | 710 /* If we fail to attach to an LWP, just warn. */ |
| 615 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid, | 711 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid, |
| 616 strerror (errno), errno); | 712 strerror (errno), errno); |
| 617 fflush (stderr); | 713 fflush (stderr); |
| 618 return; | 714 return; |
| 619 } | 715 } |
| 620 else | 716 |
| 621 » /* If we fail to attach to a process, report an error. */ | 717 /* If we fail to attach to a process, report an error. */ |
| 622 » error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid, | 718 buffer_init (&buffer); |
| 623 » strerror (errno), errno); | 719 linux_ptrace_attach_warnings (lwpid, &buffer); |
| 720 buffer_grow_str0 (&buffer, ""); |
| 721 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer), |
| 722 » lwpid, strerror (errno), errno); |
| 624 } | 723 } |
| 625 | 724 |
| 626 if (initial) | 725 if (initial) |
| 627 /* If lwp is the tgid, we handle adding existing threads later. | 726 /* If lwp is the tgid, we handle adding existing threads later. |
| 628 Otherwise we just add lwp without bothering about any other | 727 Otherwise we just add lwp without bothering about any other |
| 629 threads. */ | 728 threads. */ |
| 630 ptid = ptid_build (lwpid, lwpid, 0); | 729 ptid = ptid_build (lwpid, lwpid, 0); |
| 631 else | 730 else |
| 632 { | 731 { |
| 633 /* Note that extracting the pid from the current inferior is | 732 /* Note that extracting the pid from the current inferior is |
| 634 safe, since we're always called in the context of the same | 733 safe, since we're always called in the context of the same |
| 635 process as this new thread. */ | 734 process as this new thread. */ |
| 636 int pid = pid_of (get_thread_lwp (current_inferior)); | 735 int pid = pid_of (get_thread_lwp (current_inferior)); |
| 637 ptid = ptid_build (pid, lwpid, 0); | 736 ptid = ptid_build (pid, lwpid, 0); |
| 638 } | 737 } |
| 639 | 738 |
| 640 new_lwp = (struct lwp_info *) add_lwp (ptid); | 739 new_lwp = (struct lwp_info *) add_lwp (ptid); |
| 641 add_thread (ptid, new_lwp); | 740 add_thread (ptid, new_lwp); |
| 642 | 741 |
| 643 /* We need to wait for SIGSTOP before being able to make the next | 742 /* We need to wait for SIGSTOP before being able to make the next |
| 644 ptrace call on this LWP. */ | 743 ptrace call on this LWP. */ |
| 645 new_lwp->must_set_ptrace_flags = 1; | 744 new_lwp->must_set_ptrace_flags = 1; |
| 646 | 745 |
| 746 if (linux_proc_pid_is_stopped (lwpid)) |
| 747 { |
| 748 if (debug_threads) |
| 749 fprintf (stderr, |
| 750 "Attached to a stopped process\n"); |
| 751 |
| 752 /* The process is definitely stopped. It is in a job control |
| 753 stop, unless the kernel predates the TASK_STOPPED / |
| 754 TASK_TRACED distinction, in which case it might be in a |
| 755 ptrace stop. Make sure it is in a ptrace stop; from there we |
| 756 can kill it, signal it, et cetera. |
| 757 |
| 758 First make sure there is a pending SIGSTOP. Since we are |
| 759 already attached, the process can not transition from stopped |
| 760 to running without a PTRACE_CONT; so we know this signal will |
| 761 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is |
| 762 probably already in the queue (unless this kernel is old |
| 763 enough to use TASK_STOPPED for ptrace stops); but since |
| 764 SIGSTOP is not an RT signal, it can only be queued once. */ |
| 765 kill_lwp (lwpid, SIGSTOP); |
| 766 |
| 767 /* Finally, resume the stopped process. This will deliver the |
| 768 SIGSTOP (or a higher priority signal, just like normal |
| 769 PTRACE_ATTACH), which we'll catch later on. */ |
| 770 ptrace (PTRACE_CONT, lwpid, 0, 0); |
| 771 } |
| 772 |
| 647 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH | 773 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH |
| 648 brings it to a halt. | 774 brings it to a halt. |
| 649 | 775 |
| 650 There are several cases to consider here: | 776 There are several cases to consider here: |
| 651 | 777 |
| 652 1) gdbserver has already attached to the process and is being notified | 778 1) gdbserver has already attached to the process and is being notified |
| 653 of a new thread that is being created. | 779 of a new thread that is being created. |
| 654 In this case we should ignore that SIGSTOP and resume the | 780 In this case we should ignore that SIGSTOP and resume the |
| 655 process. This is handled below by setting stop_expected = 1, | 781 process. This is handled below by setting stop_expected = 1, |
| 656 and the fact that add_thread sets last_resume_kind == | 782 and the fact that add_thread sets last_resume_kind == |
| (...skipping 27 matching lines...) Expand all Loading... |
| 684 | 810 |
| 685 void | 811 void |
| 686 linux_attach_lwp (unsigned long lwpid) | 812 linux_attach_lwp (unsigned long lwpid) |
| 687 { | 813 { |
| 688 linux_attach_lwp_1 (lwpid, 0); | 814 linux_attach_lwp_1 (lwpid, 0); |
| 689 } | 815 } |
| 690 | 816 |
| 691 /* Attach to PID. If PID is the tgid, attach to it and all | 817 /* Attach to PID. If PID is the tgid, attach to it and all |
| 692 of its threads. */ | 818 of its threads. */ |
| 693 | 819 |
| 694 int | 820 static int |
| 695 linux_attach (unsigned long pid) | 821 linux_attach (unsigned long pid) |
| 696 { | 822 { |
| 697 /* Attach to PID. We will check for other threads | 823 /* Attach to PID. We will check for other threads |
| 698 soon. */ | 824 soon. */ |
| 699 linux_attach_lwp_1 (pid, 1); | 825 linux_attach_lwp_1 (pid, 1); |
| 700 linux_add_process (pid, 1); | 826 linux_add_process (pid, 1); |
| 701 | 827 |
| 702 if (!non_stop) | 828 if (!non_stop) |
| 703 { | 829 { |
| 704 struct thread_info *thread; | 830 struct thread_info *thread; |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 795 last_thread_of_process_p (struct thread_info *thread) | 921 last_thread_of_process_p (struct thread_info *thread) |
| 796 { | 922 { |
| 797 ptid_t ptid = ((struct inferior_list_entry *)thread)->id; | 923 ptid_t ptid = ((struct inferior_list_entry *)thread)->id; |
| 798 int pid = ptid_get_pid (ptid); | 924 int pid = ptid_get_pid (ptid); |
| 799 struct counter counter = { pid , 0 }; | 925 struct counter counter = { pid , 0 }; |
| 800 | 926 |
| 801 return (find_inferior (&all_threads, | 927 return (find_inferior (&all_threads, |
| 802 second_thread_of_pid_p, &counter) == NULL); | 928 second_thread_of_pid_p, &counter) == NULL); |
| 803 } | 929 } |
| 804 | 930 |
| 805 /* Kill the inferior lwp. */ | 931 /* Kill LWP. */ |
| 932 |
| 933 static void |
| 934 linux_kill_one_lwp (struct lwp_info *lwp) |
| 935 { |
| 936 int pid = lwpid_of (lwp); |
| 937 |
| 938 /* PTRACE_KILL is unreliable. After stepping into a signal handler, |
| 939 there is no signal context, and ptrace(PTRACE_KILL) (or |
| 940 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like |
| 941 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better |
| 942 alternative is to kill with SIGKILL. We only need one SIGKILL |
| 943 per process, not one for each thread. But since we still support |
| 944 linuxthreads, and we also support debugging programs using raw |
| 945 clone without CLONE_THREAD, we send one for each thread. For |
| 946 years, we used PTRACE_KILL only, so we're being a bit paranoid |
| 947 about some old kernels where PTRACE_KILL might work better |
| 948 (dubious if there are any such, but that's why it's paranoia), so |
| 949 we try SIGKILL first, PTRACE_KILL second, and so we're fine |
| 950 everywhere. */ |
| 951 |
| 952 errno = 0; |
| 953 kill (pid, SIGKILL); |
| 954 if (debug_threads) |
| 955 fprintf (stderr, |
| 956 » "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n", |
| 957 » target_pid_to_str (ptid_of (lwp)), |
| 958 » errno ? strerror (errno) : "OK"); |
| 959 |
| 960 errno = 0; |
| 961 ptrace (PTRACE_KILL, pid, 0, 0); |
| 962 if (debug_threads) |
| 963 fprintf (stderr, |
| 964 » "LKL: PTRACE_KILL %s, 0, 0 (%s)\n", |
| 965 » target_pid_to_str (ptid_of (lwp)), |
| 966 » errno ? strerror (errno) : "OK"); |
| 967 } |
| 968 |
| 969 /* Callback for `find_inferior'. Kills an lwp of a given process, |
| 970 except the leader. */ |
| 806 | 971 |
| 807 static int | 972 static int |
| 808 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args) | 973 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args) |
| 809 { | 974 { |
| 810 struct thread_info *thread = (struct thread_info *) entry; | 975 struct thread_info *thread = (struct thread_info *) entry; |
| 811 struct lwp_info *lwp = get_thread_lwp (thread); | 976 struct lwp_info *lwp = get_thread_lwp (thread); |
| 812 int wstat; | 977 int wstat; |
| 813 int pid = * (int *) args; | 978 int pid = * (int *) args; |
| 814 | 979 |
| 815 if (ptid_get_pid (entry->id) != pid) | 980 if (ptid_get_pid (entry->id) != pid) |
| 816 return 0; | 981 return 0; |
| 817 | 982 |
| 818 /* We avoid killing the first thread here, because of a Linux kernel (at | 983 /* We avoid killing the first thread here, because of a Linux kernel (at |
| 819 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before | 984 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before |
| 820 the children get a chance to be reaped, it will remain a zombie | 985 the children get a chance to be reaped, it will remain a zombie |
| 821 forever. */ | 986 forever. */ |
| 822 | 987 |
| 823 if (lwpid_of (lwp) == pid) | 988 if (lwpid_of (lwp) == pid) |
| 824 { | 989 { |
| 825 if (debug_threads) | 990 if (debug_threads) |
| 826 fprintf (stderr, "lkop: is last of process %s\n", | 991 fprintf (stderr, "lkop: is last of process %s\n", |
| 827 target_pid_to_str (entry->id)); | 992 target_pid_to_str (entry->id)); |
| 828 return 0; | 993 return 0; |
| 829 } | 994 } |
| 830 | 995 |
| 831 do | 996 do |
| 832 { | 997 { |
| 833 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0); | 998 linux_kill_one_lwp (lwp); |
| 834 | 999 |
| 835 /* Make sure it died. The loop is most likely unnecessary. */ | 1000 /* Make sure it died. The loop is most likely unnecessary. */ |
| 836 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL); | 1001 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL); |
| 837 } while (pid > 0 && WIFSTOPPED (wstat)); | 1002 } while (pid > 0 && WIFSTOPPED (wstat)); |
| 838 | 1003 |
| 839 return 0; | 1004 return 0; |
| 840 } | 1005 } |
| 841 | 1006 |
| 842 static int | 1007 static int |
| 843 linux_kill (int pid) | 1008 linux_kill (int pid) |
| 844 { | 1009 { |
| 845 struct process_info *process; | 1010 struct process_info *process; |
| 846 struct lwp_info *lwp; | 1011 struct lwp_info *lwp; |
| 847 int wstat; | 1012 int wstat; |
| 848 int lwpid; | 1013 int lwpid; |
| 849 | 1014 |
| 850 process = find_process_pid (pid); | 1015 process = find_process_pid (pid); |
| 851 if (process == NULL) | 1016 if (process == NULL) |
| 852 return -1; | 1017 return -1; |
| 853 | 1018 |
| 854 /* If we're killing a running inferior, make sure it is stopped | 1019 /* If we're killing a running inferior, make sure it is stopped |
| 855 first, as PTRACE_KILL will not work otherwise. */ | 1020 first, as PTRACE_KILL will not work otherwise. */ |
| 856 stop_all_lwps (0, NULL); | 1021 stop_all_lwps (0, NULL); |
| 857 | 1022 |
| 858 find_inferior (&all_threads, linux_kill_one_lwp, &pid); | 1023 find_inferior (&all_threads, kill_one_lwp_callback , &pid); |
| 859 | 1024 |
| 860 /* See the comment in linux_kill_one_lwp. We did not kill the first | 1025 /* See the comment in linux_kill_one_lwp. We did not kill the first |
| 861 thread in the list, so do so now. */ | 1026 thread in the list, so do so now. */ |
| 862 lwp = find_lwp_pid (pid_to_ptid (pid)); | 1027 lwp = find_lwp_pid (pid_to_ptid (pid)); |
| 863 | 1028 |
| 864 if (lwp == NULL) | 1029 if (lwp == NULL) |
| 865 { | 1030 { |
| 866 if (debug_threads) | 1031 if (debug_threads) |
| 867 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n", | 1032 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n", |
| 868 lwpid_of (lwp), pid); | 1033 lwpid_of (lwp), pid); |
| 869 } | 1034 } |
| 870 else | 1035 else |
| 871 { | 1036 { |
| 872 if (debug_threads) | 1037 if (debug_threads) |
| 873 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n", | 1038 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n", |
| 874 lwpid_of (lwp), pid); | 1039 lwpid_of (lwp), pid); |
| 875 | 1040 |
| 876 do | 1041 do |
| 877 { | 1042 { |
| 878 » ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0); | 1043 » linux_kill_one_lwp (lwp); |
| 879 | 1044 |
| 880 /* Make sure it died. The loop is most likely unnecessary. */ | 1045 /* Make sure it died. The loop is most likely unnecessary. */ |
| 881 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL); | 1046 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL); |
| 882 } while (lwpid > 0 && WIFSTOPPED (wstat)); | 1047 } while (lwpid > 0 && WIFSTOPPED (wstat)); |
| 883 } | 1048 } |
| 884 | 1049 |
| 885 the_target->mourn (process); | 1050 the_target->mourn (process); |
| 886 | 1051 |
| 887 /* Since we presently can only stop all lwps of all processes, we | 1052 /* Since we presently can only stop all lwps of all processes, we |
| 888 need to unstop lwps of other processes. */ | 1053 need to unstop lwps of other processes. */ |
| 889 unstop_all_lwps (0, NULL); | 1054 unstop_all_lwps (0, NULL); |
| 890 return 0; | 1055 return 0; |
| 891 } | 1056 } |
| 892 | 1057 |
| 1058 /* Get pending signal of THREAD, for detaching purposes. This is the |
| 1059 signal the thread last stopped for, which we need to deliver to the |
| 1060 thread when detaching, otherwise, it'd be suppressed/lost. */ |
| 1061 |
| 1062 static int |
| 1063 get_detach_signal (struct thread_info *thread) |
| 1064 { |
| 1065 enum gdb_signal signo = GDB_SIGNAL_0; |
| 1066 int status; |
| 1067 struct lwp_info *lp = get_thread_lwp (thread); |
| 1068 |
| 1069 if (lp->status_pending_p) |
| 1070 status = lp->status_pending; |
| 1071 else |
| 1072 { |
| 1073 /* If the thread had been suspended by gdbserver, and it stopped |
| 1074 cleanly, then it'll have stopped with SIGSTOP. But we don't |
| 1075 want to deliver that SIGSTOP. */ |
| 1076 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED |
| 1077 || thread->last_status.value.sig == GDB_SIGNAL_0) |
| 1078 return 0; |
| 1079 |
| 1080 /* Otherwise, we may need to deliver the signal we |
| 1081 intercepted. */ |
| 1082 status = lp->last_status; |
| 1083 } |
| 1084 |
| 1085 if (!WIFSTOPPED (status)) |
| 1086 { |
| 1087 if (debug_threads) |
| 1088 fprintf (stderr, |
| 1089 "GPS: lwp %s hasn't stopped: no pending signal\n", |
| 1090 target_pid_to_str (ptid_of (lp))); |
| 1091 return 0; |
| 1092 } |
| 1093 |
| 1094 /* Extended wait statuses aren't real SIGTRAPs. */ |
| 1095 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) |
| 1096 { |
| 1097 if (debug_threads) |
| 1098 fprintf (stderr, |
| 1099 "GPS: lwp %s had stopped with extended " |
| 1100 "status: no pending signal\n", |
| 1101 target_pid_to_str (ptid_of (lp))); |
| 1102 return 0; |
| 1103 } |
| 1104 |
| 1105 signo = gdb_signal_from_host (WSTOPSIG (status)); |
| 1106 |
| 1107 if (program_signals_p && !program_signals[signo]) |
| 1108 { |
| 1109 if (debug_threads) |
| 1110 fprintf (stderr, |
| 1111 "GPS: lwp %s had signal %s, but it is in nopass state\n", |
| 1112 target_pid_to_str (ptid_of (lp)), |
| 1113 gdb_signal_to_string (signo)); |
| 1114 return 0; |
| 1115 } |
| 1116 else if (!program_signals_p |
| 1117 /* If we have no way to know which signals GDB does not |
| 1118 want to have passed to the program, assume |
| 1119 SIGTRAP/SIGINT, which is GDB's default. */ |
| 1120 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT)) |
| 1121 { |
| 1122 if (debug_threads) |
| 1123 fprintf (stderr, |
| 1124 "GPS: lwp %s had signal %s, " |
| 1125 "but we don't know if we should pass it. Default to not.\n", |
| 1126 target_pid_to_str (ptid_of (lp)), |
| 1127 gdb_signal_to_string (signo)); |
| 1128 return 0; |
| 1129 } |
| 1130 else |
| 1131 { |
| 1132 if (debug_threads) |
| 1133 fprintf (stderr, |
| 1134 "GPS: lwp %s has pending signal %s: delivering it.\n", |
| 1135 target_pid_to_str (ptid_of (lp)), |
| 1136 gdb_signal_to_string (signo)); |
| 1137 |
| 1138 return WSTOPSIG (status); |
| 1139 } |
| 1140 } |
| 1141 |
| 893 static int | 1142 static int |
| 894 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args) | 1143 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args) |
| 895 { | 1144 { |
| 896 struct thread_info *thread = (struct thread_info *) entry; | 1145 struct thread_info *thread = (struct thread_info *) entry; |
| 897 struct lwp_info *lwp = get_thread_lwp (thread); | 1146 struct lwp_info *lwp = get_thread_lwp (thread); |
| 898 int pid = * (int *) args; | 1147 int pid = * (int *) args; |
| 1148 int sig; |
| 899 | 1149 |
| 900 if (ptid_get_pid (entry->id) != pid) | 1150 if (ptid_get_pid (entry->id) != pid) |
| 901 return 0; | 1151 return 0; |
| 902 | 1152 |
| 903 /* If this process is stopped but is expecting a SIGSTOP, then make | 1153 /* If there is a pending SIGSTOP, get rid of it. */ |
| 904 sure we take care of that now. This isn't absolutely guaranteed | |
| 905 to collect the SIGSTOP, but is fairly likely to. */ | |
| 906 if (lwp->stop_expected) | 1154 if (lwp->stop_expected) |
| 907 { | 1155 { |
| 908 int wstat; | 1156 if (debug_threads) |
| 909 /* Clear stop_expected, so that the SIGSTOP will be reported. */ | 1157 » fprintf (stderr, |
| 1158 » » "Sending SIGCONT to %s\n", |
| 1159 » » target_pid_to_str (ptid_of (lwp))); |
| 1160 |
| 1161 kill_lwp (lwpid_of (lwp), SIGCONT); |
| 910 lwp->stop_expected = 0; | 1162 lwp->stop_expected = 0; |
| 911 linux_resume_one_lwp (lwp, 0, 0, NULL); | |
| 912 linux_wait_for_event (lwp->head.id, &wstat, __WALL); | |
| 913 } | 1163 } |
| 914 | 1164 |
| 915 /* Flush any pending changes to the process's registers. */ | 1165 /* Flush any pending changes to the process's registers. */ |
| 916 regcache_invalidate_one ((struct inferior_list_entry *) | 1166 regcache_invalidate_one ((struct inferior_list_entry *) |
| 917 get_lwp_thread (lwp)); | 1167 get_lwp_thread (lwp)); |
| 918 | 1168 |
| 1169 /* Pass on any pending signal for this thread. */ |
| 1170 sig = get_detach_signal (thread); |
| 1171 |
| 919 /* Finally, let it resume. */ | 1172 /* Finally, let it resume. */ |
| 920 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0); | 1173 if (the_low_target.prepare_to_resume != NULL) |
| 1174 the_low_target.prepare_to_resume (lwp); |
| 1175 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, |
| 1176 » (PTRACE_ARG4_TYPE) (long) sig) < 0) |
| 1177 error (_("Can't detach %s: %s"), |
| 1178 » target_pid_to_str (ptid_of (lwp)), |
| 1179 » strerror (errno)); |
| 921 | 1180 |
| 922 delete_lwp (lwp); | 1181 delete_lwp (lwp); |
| 923 return 0; | 1182 return 0; |
| 924 } | 1183 } |
| 925 | 1184 |
| 926 static int | 1185 static int |
| 927 linux_detach (int pid) | 1186 linux_detach (int pid) |
| 928 { | 1187 { |
| 929 struct process_info *process; | 1188 struct process_info *process; |
| 930 | 1189 |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1096 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp); | 1355 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp); |
| 1097 | 1356 |
| 1098 child = find_lwp_pid (pid_to_ptid (ret)); | 1357 child = find_lwp_pid (pid_to_ptid (ret)); |
| 1099 | 1358 |
| 1100 /* If we didn't find a process, one of two things presumably happened: | 1359 /* If we didn't find a process, one of two things presumably happened: |
| 1101 - A process we started and then detached from has exited. Ignore it. | 1360 - A process we started and then detached from has exited. Ignore it. |
| 1102 - A process we are controlling has forked and the new child's stop | 1361 - A process we are controlling has forked and the new child's stop |
| 1103 was reported to us by the kernel. Save its PID. */ | 1362 was reported to us by the kernel. Save its PID. */ |
| 1104 if (child == NULL && WIFSTOPPED (*wstatp)) | 1363 if (child == NULL && WIFSTOPPED (*wstatp)) |
| 1105 { | 1364 { |
| 1106 add_pid_to_list (&stopped_pids, ret); | 1365 add_to_pid_list (&stopped_pids, ret, *wstatp); |
| 1107 goto retry; | 1366 goto retry; |
| 1108 } | 1367 } |
| 1109 else if (child == NULL) | 1368 else if (child == NULL) |
| 1110 goto retry; | 1369 goto retry; |
| 1111 | 1370 |
| 1112 child->stopped = 1; | 1371 child->stopped = 1; |
| 1113 | 1372 |
| 1114 child->last_status = *wstatp; | 1373 child->last_status = *wstatp; |
| 1115 | 1374 |
| 1116 /* Architecture-specific setup after inferior is running. | 1375 /* Architecture-specific setup after inferior is running. |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1273 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat) | 1532 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat) |
| 1274 { | 1533 { |
| 1275 struct thread_info *saved_inferior; | 1534 struct thread_info *saved_inferior; |
| 1276 | 1535 |
| 1277 saved_inferior = current_inferior; | 1536 saved_inferior = current_inferior; |
| 1278 current_inferior = get_lwp_thread (lwp); | 1537 current_inferior = get_lwp_thread (lwp); |
| 1279 | 1538 |
| 1280 if ((wstat == NULL | 1539 if ((wstat == NULL |
| 1281 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP)) | 1540 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP)) |
| 1282 && supports_fast_tracepoints () | 1541 && supports_fast_tracepoints () |
| 1283 && in_process_agent_loaded ()) | 1542 && agent_loaded_p ()) |
| 1284 { | 1543 { |
| 1285 struct fast_tpoint_collect_status status; | 1544 struct fast_tpoint_collect_status status; |
| 1286 int r; | 1545 int r; |
| 1287 | 1546 |
| 1288 if (debug_threads) | 1547 if (debug_threads) |
| 1289 fprintf (stderr, "\ | 1548 fprintf (stderr, "\ |
| 1290 Checking whether LWP %ld needs to move out of the jump pad.\n", | 1549 Checking whether LWP %ld needs to move out of the jump pad.\n", |
| 1291 lwpid_of (lwp)); | 1550 lwpid_of (lwp)); |
| 1292 | 1551 |
| 1293 r = linux_fast_tracepoint_collecting (lwp, &status); | 1552 r = linux_fast_tracepoint_collecting (lwp, &status); |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1547 being stepped. */ | 1806 being stepped. */ |
| 1548 ptid_t step_over_bkpt; | 1807 ptid_t step_over_bkpt; |
| 1549 | 1808 |
| 1550 /* Wait for an event from child PID. If PID is -1, wait for any | 1809 /* Wait for an event from child PID. If PID is -1, wait for any |
| 1551 child. Store the stop status through the status pointer WSTAT. | 1810 child. Store the stop status through the status pointer WSTAT. |
| 1552 OPTIONS is passed to the waitpid call. Return 0 if no child stop | 1811 OPTIONS is passed to the waitpid call. Return 0 if no child stop |
| 1553 event was found and OPTIONS contains WNOHANG. Return the PID of | 1812 event was found and OPTIONS contains WNOHANG. Return the PID of |
| 1554 the stopped child otherwise. */ | 1813 the stopped child otherwise. */ |
| 1555 | 1814 |
| 1556 static int | 1815 static int |
| 1557 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) | 1816 linux_wait_for_event (ptid_t ptid, int *wstat, int options) |
| 1558 { | 1817 { |
| 1559 struct lwp_info *event_child, *requested_child; | 1818 struct lwp_info *event_child, *requested_child; |
| 1819 ptid_t wait_ptid; |
| 1560 | 1820 |
| 1561 event_child = NULL; | 1821 event_child = NULL; |
| 1562 requested_child = NULL; | 1822 requested_child = NULL; |
| 1563 | 1823 |
| 1564 /* Check for a lwp with a pending status. */ | 1824 /* Check for a lwp with a pending status. */ |
| 1565 | 1825 |
| 1566 if (ptid_equal (ptid, minus_one_ptid) | 1826 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid)) |
| 1567 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid)) | |
| 1568 { | 1827 { |
| 1569 event_child = (struct lwp_info *) | 1828 event_child = (struct lwp_info *) |
| 1570 find_inferior (&all_lwps, status_pending_p_callback, &ptid); | 1829 find_inferior (&all_lwps, status_pending_p_callback, &ptid); |
| 1571 if (debug_threads && event_child) | 1830 if (debug_threads && event_child) |
| 1572 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child)); | 1831 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child)); |
| 1573 } | 1832 } |
| 1574 else | 1833 else |
| 1575 { | 1834 { |
| 1576 requested_child = find_lwp_pid (ptid); | 1835 requested_child = find_lwp_pid (ptid); |
| 1577 | 1836 |
| 1578 if (!stopping_threads | 1837 if (stopping_threads == NOT_STOPPING_THREADS |
| 1579 && requested_child->status_pending_p | 1838 && requested_child->status_pending_p |
| 1580 && requested_child->collecting_fast_tracepoint) | 1839 && requested_child->collecting_fast_tracepoint) |
| 1581 { | 1840 { |
| 1582 enqueue_one_deferred_signal (requested_child, | 1841 enqueue_one_deferred_signal (requested_child, |
| 1583 &requested_child->status_pending); | 1842 &requested_child->status_pending); |
| 1584 requested_child->status_pending_p = 0; | 1843 requested_child->status_pending_p = 0; |
| 1585 requested_child->status_pending = 0; | 1844 requested_child->status_pending = 0; |
| 1586 linux_resume_one_lwp (requested_child, 0, 0, NULL); | 1845 linux_resume_one_lwp (requested_child, 0, 0, NULL); |
| 1587 } | 1846 } |
| 1588 | 1847 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1599 if (debug_threads) | 1858 if (debug_threads) |
| 1600 fprintf (stderr, "Got an event from pending child %ld (%04x)\n", | 1859 fprintf (stderr, "Got an event from pending child %ld (%04x)\n", |
| 1601 lwpid_of (event_child), event_child->status_pending); | 1860 lwpid_of (event_child), event_child->status_pending); |
| 1602 *wstat = event_child->status_pending; | 1861 *wstat = event_child->status_pending; |
| 1603 event_child->status_pending_p = 0; | 1862 event_child->status_pending_p = 0; |
| 1604 event_child->status_pending = 0; | 1863 event_child->status_pending = 0; |
| 1605 current_inferior = get_lwp_thread (event_child); | 1864 current_inferior = get_lwp_thread (event_child); |
| 1606 return lwpid_of (event_child); | 1865 return lwpid_of (event_child); |
| 1607 } | 1866 } |
| 1608 | 1867 |
| 1868 if (ptid_is_pid (ptid)) |
| 1869 { |
| 1870 /* A request to wait for a specific tgid. This is not possible |
| 1871 with waitpid, so instead, we wait for any child, and leave |
| 1872 children we're not interested in right now with a pending |
| 1873 status to report later. */ |
| 1874 wait_ptid = minus_one_ptid; |
| 1875 } |
| 1876 else |
| 1877 wait_ptid = ptid; |
| 1878 |
| 1609 /* We only enter this loop if no process has a pending wait status. Thus | 1879 /* We only enter this loop if no process has a pending wait status. Thus |
| 1610 any action taken in response to a wait status inside this loop is | 1880 any action taken in response to a wait status inside this loop is |
| 1611 responding as soon as we detect the status, not after any pending | 1881 responding as soon as we detect the status, not after any pending |
| 1612 events. */ | 1882 events. */ |
| 1613 while (1) | 1883 while (1) |
| 1614 { | 1884 { |
| 1615 event_child = linux_wait_for_lwp (ptid, wstat, options); | 1885 event_child = linux_wait_for_lwp (wait_ptid, wstat, options); |
| 1616 | 1886 |
| 1617 if ((options & WNOHANG) && event_child == NULL) | 1887 if ((options & WNOHANG) && event_child == NULL) |
| 1618 { | 1888 { |
| 1619 if (debug_threads) | 1889 if (debug_threads) |
| 1620 fprintf (stderr, "WNOHANG set, no event found\n"); | 1890 fprintf (stderr, "WNOHANG set, no event found\n"); |
| 1621 return 0; | 1891 return 0; |
| 1622 } | 1892 } |
| 1623 | 1893 |
| 1624 if (event_child == NULL) | 1894 if (event_child == NULL) |
| 1625 error ("event from unknown child"); | 1895 error ("event from unknown child"); |
| 1626 | 1896 |
| 1897 if (ptid_is_pid (ptid) |
| 1898 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child))) |
| 1899 { |
| 1900 if (! WIFSTOPPED (*wstat)) |
| 1901 mark_lwp_dead (event_child, *wstat); |
| 1902 else |
| 1903 { |
| 1904 event_child->status_pending_p = 1; |
| 1905 event_child->status_pending = *wstat; |
| 1906 } |
| 1907 continue; |
| 1908 } |
| 1909 |
| 1627 current_inferior = get_lwp_thread (event_child); | 1910 current_inferior = get_lwp_thread (event_child); |
| 1628 | 1911 |
| 1629 /* Check for thread exit. */ | 1912 /* Check for thread exit. */ |
| 1630 if (! WIFSTOPPED (*wstat)) | 1913 if (! WIFSTOPPED (*wstat)) |
| 1631 { | 1914 { |
| 1632 if (debug_threads) | 1915 if (debug_threads) |
| 1633 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child)); | 1916 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child)); |
| 1634 | 1917 |
| 1635 /* If the last thread is exiting, just return. */ | 1918 /* If the last thread is exiting, just return. */ |
| 1636 if (last_thread_of_process_p (current_inferior)) | 1919 if (last_thread_of_process_p (current_inferior)) |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 && WSTOPSIG (*wstat) == SIGSTOP | 1975 && WSTOPSIG (*wstat) == SIGSTOP |
| 1693 && event_child->stop_expected) | 1976 && event_child->stop_expected) |
| 1694 { | 1977 { |
| 1695 int should_stop; | 1978 int should_stop; |
| 1696 | 1979 |
| 1697 if (debug_threads) | 1980 if (debug_threads) |
| 1698 fprintf (stderr, "Expected stop.\n"); | 1981 fprintf (stderr, "Expected stop.\n"); |
| 1699 event_child->stop_expected = 0; | 1982 event_child->stop_expected = 0; |
| 1700 | 1983 |
| 1701 should_stop = (current_inferior->last_resume_kind == resume_stop | 1984 should_stop = (current_inferior->last_resume_kind == resume_stop |
| 1702 » » » || stopping_threads); | 1985 » » » || stopping_threads != NOT_STOPPING_THREADS); |
| 1703 | 1986 |
| 1704 if (!should_stop) | 1987 if (!should_stop) |
| 1705 { | 1988 { |
| 1706 linux_resume_one_lwp (event_child, | 1989 linux_resume_one_lwp (event_child, |
| 1707 event_child->stepping, 0, NULL); | 1990 event_child->stepping, 0, NULL); |
| 1708 continue; | 1991 continue; |
| 1709 } | 1992 } |
| 1710 } | 1993 } |
| 1711 | 1994 |
| 1712 return lwpid_of (event_child); | 1995 return lwpid_of (event_child); |
| 1713 } | 1996 } |
| 1714 | 1997 |
| 1715 /* NOTREACHED */ | 1998 /* NOTREACHED */ |
| 1716 return 0; | 1999 return 0; |
| 1717 } | 2000 } |
| 1718 | 2001 |
| 1719 static int | |
| 1720 linux_wait_for_event (ptid_t ptid, int *wstat, int options) | |
| 1721 { | |
| 1722 ptid_t wait_ptid; | |
| 1723 | |
| 1724 if (ptid_is_pid (ptid)) | |
| 1725 { | |
| 1726 /* A request to wait for a specific tgid. This is not possible | |
| 1727 with waitpid, so instead, we wait for any child, and leave | |
| 1728 children we're not interested in right now with a pending | |
| 1729 status to report later. */ | |
| 1730 wait_ptid = minus_one_ptid; | |
| 1731 } | |
| 1732 else | |
| 1733 wait_ptid = ptid; | |
| 1734 | |
| 1735 while (1) | |
| 1736 { | |
| 1737 int event_pid; | |
| 1738 | |
| 1739 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options); | |
| 1740 | |
| 1741 if (event_pid > 0 | |
| 1742 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid) | |
| 1743 { | |
| 1744 struct lwp_info *event_child | |
| 1745 = find_lwp_pid (pid_to_ptid (event_pid)); | |
| 1746 | |
| 1747 if (! WIFSTOPPED (*wstat)) | |
| 1748 mark_lwp_dead (event_child, *wstat); | |
| 1749 else | |
| 1750 { | |
| 1751 event_child->status_pending_p = 1; | |
| 1752 event_child->status_pending = *wstat; | |
| 1753 } | |
| 1754 } | |
| 1755 else | |
| 1756 return event_pid; | |
| 1757 } | |
| 1758 } | |
| 1759 | |
| 1760 | |
| 1761 /* Count the LWP's that have had events. */ | 2002 /* Count the LWP's that have had events. */ |
| 1762 | 2003 |
| 1763 static int | 2004 static int |
| 1764 count_events_callback (struct inferior_list_entry *entry, void *data) | 2005 count_events_callback (struct inferior_list_entry *entry, void *data) |
| 1765 { | 2006 { |
| 1766 struct lwp_info *lp = (struct lwp_info *) entry; | 2007 struct lwp_info *lp = (struct lwp_info *) entry; |
| 1767 struct thread_info *thread = get_lwp_thread (lp); | 2008 struct thread_info *thread = get_lwp_thread (lp); |
| 1768 int *count = data; | 2009 int *count = data; |
| 1769 | 2010 |
| 1770 gdb_assert (count != NULL); | 2011 gdb_assert (count != NULL); |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2014 over internal breakpoints and such. */ | 2255 over internal breakpoints and such. */ |
| 2015 linux_wait_1 (minus_one_ptid, &ourstatus, 0); | 2256 linux_wait_1 (minus_one_ptid, &ourstatus, 0); |
| 2016 | 2257 |
| 2017 if (ourstatus.kind == TARGET_WAITKIND_STOPPED) | 2258 if (ourstatus.kind == TARGET_WAITKIND_STOPPED) |
| 2018 { | 2259 { |
| 2019 lwp = get_thread_lwp (current_inferior); | 2260 lwp = get_thread_lwp (current_inferior); |
| 2020 | 2261 |
| 2021 /* Lock it. */ | 2262 /* Lock it. */ |
| 2022 lwp->suspended++; | 2263 lwp->suspended++; |
| 2023 | 2264 |
| 2024 » if (ourstatus.value.sig != TARGET_SIGNAL_0 | 2265 » if (ourstatus.value.sig != GDB_SIGNAL_0 |
| 2025 || current_inferior->last_resume_kind == resume_stop) | 2266 || current_inferior->last_resume_kind == resume_stop) |
| 2026 { | 2267 { |
| 2027 » wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig)); | 2268 » wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig)); |
| 2028 enqueue_one_deferred_signal (lwp, &wstat); | 2269 enqueue_one_deferred_signal (lwp, &wstat); |
| 2029 } | 2270 } |
| 2030 } | 2271 } |
| 2031 } | 2272 } |
| 2032 | 2273 |
| 2033 find_inferior (&all_lwps, unsuspend_one_lwp, NULL); | 2274 find_inferior (&all_lwps, unsuspend_one_lwp, NULL); |
| 2034 | 2275 |
| 2035 stabilizing_threads = 0; | 2276 stabilizing_threads = 0; |
| 2036 | 2277 |
| 2037 current_inferior = save_inferior; | 2278 current_inferior = save_inferior; |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2138 ourstatus->value.integer = WEXITSTATUS (w); | 2379 ourstatus->value.integer = WEXITSTATUS (w); |
| 2139 | 2380 |
| 2140 if (debug_threads) | 2381 if (debug_threads) |
| 2141 fprintf (stderr, | 2382 fprintf (stderr, |
| 2142 "\nChild exited with retcode = %x \n", | 2383 "\nChild exited with retcode = %x \n", |
| 2143 WEXITSTATUS (w)); | 2384 WEXITSTATUS (w)); |
| 2144 } | 2385 } |
| 2145 else | 2386 else |
| 2146 { | 2387 { |
| 2147 ourstatus->kind = TARGET_WAITKIND_SIGNALLED; | 2388 ourstatus->kind = TARGET_WAITKIND_SIGNALLED; |
| 2148 » ourstatus->value.sig = target_signal_from_host (WTERMSIG (w)); | 2389 » ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w)); |
| 2149 | 2390 |
| 2150 if (debug_threads) | 2391 if (debug_threads) |
| 2151 fprintf (stderr, | 2392 fprintf (stderr, |
| 2152 "\nChild terminated with signal = %x \n", | 2393 "\nChild terminated with signal = %x \n", |
| 2153 WTERMSIG (w)); | 2394 WTERMSIG (w)); |
| 2154 | 2395 |
| 2155 } | 2396 } |
| 2156 | 2397 |
| 2157 return ptid_of (event_child); | 2398 return ptid_of (event_child); |
| 2158 } | 2399 } |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2222 /* We have all the data we need. Either report the event to GDB, or | 2463 /* We have all the data we need. Either report the event to GDB, or |
| 2223 resume threads and keep waiting for more. */ | 2464 resume threads and keep waiting for more. */ |
| 2224 | 2465 |
| 2225 /* If we're collecting a fast tracepoint, finish the collection and | 2466 /* If we're collecting a fast tracepoint, finish the collection and |
| 2226 move out of the jump pad before delivering a signal. See | 2467 move out of the jump pad before delivering a signal. See |
| 2227 linux_stabilize_threads. */ | 2468 linux_stabilize_threads. */ |
| 2228 | 2469 |
| 2229 if (WIFSTOPPED (w) | 2470 if (WIFSTOPPED (w) |
| 2230 && WSTOPSIG (w) != SIGTRAP | 2471 && WSTOPSIG (w) != SIGTRAP |
| 2231 && supports_fast_tracepoints () | 2472 && supports_fast_tracepoints () |
| 2232 && in_process_agent_loaded ()) | 2473 && agent_loaded_p ()) |
| 2233 { | 2474 { |
| 2234 if (debug_threads) | 2475 if (debug_threads) |
| 2235 fprintf (stderr, | 2476 fprintf (stderr, |
| 2236 "Got signal %d for LWP %ld. Check if we need " | 2477 "Got signal %d for LWP %ld. Check if we need " |
| 2237 "to defer or adjust it.\n", | 2478 "to defer or adjust it.\n", |
| 2238 WSTOPSIG (w), lwpid_of (event_child)); | 2479 WSTOPSIG (w), lwpid_of (event_child)); |
| 2239 | 2480 |
| 2240 /* Allow debugging the jump pad itself. */ | 2481 /* Allow debugging the jump pad itself. */ |
| 2241 if (current_inferior->last_resume_kind != resume_step | 2482 if (current_inferior->last_resume_kind != resume_step |
| 2242 && maybe_move_out_of_jump_pad (event_child, &w)) | 2483 && maybe_move_out_of_jump_pad (event_child, &w)) |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2310 fprintf (stderr, "dequeued one signal.\n"); | 2551 fprintf (stderr, "dequeued one signal.\n"); |
| 2311 } | 2552 } |
| 2312 else | 2553 else |
| 2313 { | 2554 { |
| 2314 if (debug_threads) | 2555 if (debug_threads) |
| 2315 fprintf (stderr, "no deferred signals.\n"); | 2556 fprintf (stderr, "no deferred signals.\n"); |
| 2316 | 2557 |
| 2317 if (stabilizing_threads) | 2558 if (stabilizing_threads) |
| 2318 { | 2559 { |
| 2319 ourstatus->kind = TARGET_WAITKIND_STOPPED; | 2560 ourstatus->kind = TARGET_WAITKIND_STOPPED; |
| 2320 » » ourstatus->value.sig = TARGET_SIGNAL_0; | 2561 » » ourstatus->value.sig = GDB_SIGNAL_0; |
| 2321 return ptid_of (event_child); | 2562 return ptid_of (event_child); |
| 2322 } | 2563 } |
| 2323 } | 2564 } |
| 2324 } | 2565 } |
| 2325 } | 2566 } |
| 2326 | 2567 |
| 2327 /* Check whether GDB would be interested in this event. */ | 2568 /* Check whether GDB would be interested in this event. */ |
| 2328 | 2569 |
| 2329 /* If GDB is not interested in this signal, don't stop other | 2570 /* If GDB is not interested in this signal, don't stop other |
| 2330 threads, and don't report it to GDB. Just resume the inferior | 2571 threads, and don't report it to GDB. Just resume the inferior |
| 2331 right away. We do this for threading-related signals as well as | 2572 right away. We do this for threading-related signals as well as |
| 2332 any that GDB specifically requested we ignore. But never ignore | 2573 any that GDB specifically requested we ignore. But never ignore |
| 2333 SIGSTOP if we sent it ourselves, and do not ignore signals when | 2574 SIGSTOP if we sent it ourselves, and do not ignore signals when |
| 2334 stepping - they may require special handling to skip the signal | 2575 stepping - they may require special handling to skip the signal |
| 2335 handler. */ | 2576 handler. */ |
| 2336 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's | 2577 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's |
| 2337 thread library? */ | 2578 thread library? */ |
| 2338 if (WIFSTOPPED (w) | 2579 if (WIFSTOPPED (w) |
| 2339 && current_inferior->last_resume_kind != resume_step | 2580 && current_inferior->last_resume_kind != resume_step |
| 2340 && ( | 2581 && ( |
| 2341 #if defined (USE_THREAD_DB) && !defined (__ANDROID__) | 2582 #if defined (USE_THREAD_DB) && !defined (__ANDROID__) |
| 2342 (current_process ()->private->thread_db != NULL | 2583 (current_process ()->private->thread_db != NULL |
| 2343 && (WSTOPSIG (w) == __SIGRTMIN | 2584 && (WSTOPSIG (w) == __SIGRTMIN |
| 2344 || WSTOPSIG (w) == __SIGRTMIN + 1)) | 2585 || WSTOPSIG (w) == __SIGRTMIN + 1)) |
| 2345 || | 2586 || |
| 2346 #endif | 2587 #endif |
| 2347 » (pass_signals[target_signal_from_host (WSTOPSIG (w))] | 2588 » (pass_signals[gdb_signal_from_host (WSTOPSIG (w))] |
| 2348 && !(WSTOPSIG (w) == SIGSTOP | 2589 && !(WSTOPSIG (w) == SIGSTOP |
| 2349 && current_inferior->last_resume_kind == resume_stop)))) | 2590 && current_inferior->last_resume_kind == resume_stop)))) |
| 2350 { | 2591 { |
| 2351 siginfo_t info, *info_p; | 2592 siginfo_t info, *info_p; |
| 2352 | 2593 |
| 2353 if (debug_threads) | 2594 if (debug_threads) |
| 2354 fprintf (stderr, "Ignored signal %d for LWP %ld.\n", | 2595 fprintf (stderr, "Ignored signal %d for LWP %ld.\n", |
| 2355 WSTOPSIG (w), lwpid_of (event_child)); | 2596 WSTOPSIG (w), lwpid_of (event_child)); |
| 2356 | 2597 |
| 2357 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0) | 2598 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2369 SIGTRAP we can't explain could be a GDB breakpoint --- we may or | 2610 SIGTRAP we can't explain could be a GDB breakpoint --- we may or |
| 2370 not support Z0 breakpoints. If we do, we're be able to handle | 2611 not support Z0 breakpoints. If we do, we're be able to handle |
| 2371 GDB breakpoints on top of internal breakpoints, by handling the | 2612 GDB breakpoints on top of internal breakpoints, by handling the |
| 2372 internal breakpoint and still reporting the event to GDB. If we | 2613 internal breakpoint and still reporting the event to GDB. If we |
| 2373 don't, we're out of luck, GDB won't see the breakpoint hit. */ | 2614 don't, we're out of luck, GDB won't see the breakpoint hit. */ |
| 2374 report_to_gdb = (!maybe_internal_trap | 2615 report_to_gdb = (!maybe_internal_trap |
| 2375 || current_inferior->last_resume_kind == resume_step | 2616 || current_inferior->last_resume_kind == resume_step |
| 2376 || event_child->stopped_by_watchpoint | 2617 || event_child->stopped_by_watchpoint |
| 2377 || (!step_over_finished | 2618 || (!step_over_finished |
| 2378 && !bp_explains_trap && !trace_event) | 2619 && !bp_explains_trap && !trace_event) |
| 2379 » » || gdb_breakpoint_here (event_child->stop_pc)); | 2620 » » || (gdb_breakpoint_here (event_child->stop_pc) |
| 2621 » » && gdb_condition_true_at_breakpoint (event_child->stop_pc
) |
| 2622 » » && gdb_no_commands_at_breakpoint (event_child->stop_pc)))
; |
| 2623 |
| 2624 run_breakpoint_commands (event_child->stop_pc); |
| 2380 | 2625 |
| 2381 /* We found no reason GDB would want us to stop. We either hit one | 2626 /* We found no reason GDB would want us to stop. We either hit one |
| 2382 of our own breakpoints, or finished an internal step GDB | 2627 of our own breakpoints, or finished an internal step GDB |
| 2383 shouldn't know about. */ | 2628 shouldn't know about. */ |
| 2384 if (!report_to_gdb) | 2629 if (!report_to_gdb) |
| 2385 { | 2630 { |
| 2386 if (debug_threads) | 2631 if (debug_threads) |
| 2387 { | 2632 { |
| 2388 if (bp_explains_trap) | 2633 if (bp_explains_trap) |
| 2389 fprintf (stderr, "Hit a gdbserver breakpoint.\n"); | 2634 fprintf (stderr, "Hit a gdbserver breakpoint.\n"); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2452 event_child->status_pending_p = 0; | 2697 event_child->status_pending_p = 0; |
| 2453 w = event_child->status_pending; | 2698 w = event_child->status_pending; |
| 2454 } | 2699 } |
| 2455 | 2700 |
| 2456 /* Now that we've selected our final event LWP, cancel any | 2701 /* Now that we've selected our final event LWP, cancel any |
| 2457 breakpoints in other LWPs that have hit a GDB breakpoint. | 2702 breakpoints in other LWPs that have hit a GDB breakpoint. |
| 2458 See the comment in cancel_breakpoints_callback to find out | 2703 See the comment in cancel_breakpoints_callback to find out |
| 2459 why. */ | 2704 why. */ |
| 2460 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child); | 2705 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child); |
| 2461 | 2706 |
| 2707 /* If we were going a step-over, all other threads but the stepping one |
| 2708 had been paused in start_step_over, with their suspend counts |
| 2709 incremented. We don't want to do a full unstop/unpause, because we're |
| 2710 in all-stop mode (so we want threads stopped), but we still need to |
| 2711 unsuspend the other threads, to decrement their `suspended' count |
| 2712 back. */ |
| 2713 if (step_over_finished) |
| 2714 unsuspend_all_lwps (event_child); |
| 2715 |
| 2462 /* Stabilize threads (move out of jump pads). */ | 2716 /* Stabilize threads (move out of jump pads). */ |
| 2463 stabilize_threads (); | 2717 stabilize_threads (); |
| 2464 } | 2718 } |
| 2465 else | 2719 else |
| 2466 { | 2720 { |
| 2467 /* If we just finished a step-over, then all threads had been | 2721 /* If we just finished a step-over, then all threads had been |
| 2468 momentarily paused. In all-stop, that's fine, we want | 2722 momentarily paused. In all-stop, that's fine, we want |
| 2469 threads stopped by now anyway. In non-stop, we need to | 2723 threads stopped by now anyway. In non-stop, we need to |
| 2470 re-resume threads that GDB wanted to be running. */ | 2724 re-resume threads that GDB wanted to be running. */ |
| 2471 if (step_over_finished) | 2725 if (step_over_finished) |
| 2472 unstop_all_lwps (1, event_child); | 2726 unstop_all_lwps (1, event_child); |
| 2473 } | 2727 } |
| 2474 | 2728 |
| 2475 ourstatus->kind = TARGET_WAITKIND_STOPPED; | 2729 ourstatus->kind = TARGET_WAITKIND_STOPPED; |
| 2476 | 2730 |
| 2477 if (current_inferior->last_resume_kind == resume_stop | 2731 if (current_inferior->last_resume_kind == resume_stop |
| 2478 && WSTOPSIG (w) == SIGSTOP) | 2732 && WSTOPSIG (w) == SIGSTOP) |
| 2479 { | 2733 { |
| 2480 /* A thread that has been requested to stop by GDB with vCont;t, | 2734 /* A thread that has been requested to stop by GDB with vCont;t, |
| 2481 and it stopped cleanly, so report as SIG0. The use of | 2735 and it stopped cleanly, so report as SIG0. The use of |
| 2482 SIGSTOP is an implementation detail. */ | 2736 SIGSTOP is an implementation detail. */ |
| 2483 ourstatus->value.sig = TARGET_SIGNAL_0; | 2737 ourstatus->value.sig = GDB_SIGNAL_0; |
| 2484 } | 2738 } |
| 2485 else if (current_inferior->last_resume_kind == resume_stop | 2739 else if (current_inferior->last_resume_kind == resume_stop |
| 2486 && WSTOPSIG (w) != SIGSTOP) | 2740 && WSTOPSIG (w) != SIGSTOP) |
| 2487 { | 2741 { |
| 2488 /* A thread that has been requested to stop by GDB with vCont;t, | 2742 /* A thread that has been requested to stop by GDB with vCont;t, |
| 2489 but, it stopped for other reasons. */ | 2743 but, it stopped for other reasons. */ |
| 2490 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w)); | 2744 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w)); |
| 2491 } | 2745 } |
| 2492 else | 2746 else |
| 2493 { | 2747 { |
| 2494 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w)); | 2748 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w)); |
| 2495 } | 2749 } |
| 2496 | 2750 |
| 2497 gdb_assert (ptid_equal (step_over_bkpt, null_ptid)); | 2751 gdb_assert (ptid_equal (step_over_bkpt, null_ptid)); |
| 2498 | 2752 |
| 2499 if (debug_threads) | 2753 if (debug_threads) |
| 2500 fprintf (stderr, "linux_wait ret = %s, %d, %d\n", | 2754 fprintf (stderr, "linux_wait ret = %s, %d, %d\n", |
| 2501 target_pid_to_str (ptid_of (event_child)), | 2755 target_pid_to_str (ptid_of (event_child)), |
| 2502 ourstatus->kind, | 2756 ourstatus->kind, |
| 2503 ourstatus->value.sig); | 2757 ourstatus->value.sig); |
| 2504 | 2758 |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2766 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data) | 3020 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data) |
| 2767 { | 3021 { |
| 2768 struct lwp_info *lwp = (struct lwp_info *) entry; | 3022 struct lwp_info *lwp = (struct lwp_info *) entry; |
| 2769 struct thread_info *thread = get_lwp_thread (lwp); | 3023 struct thread_info *thread = get_lwp_thread (lwp); |
| 2770 | 3024 |
| 2771 gdb_assert (lwp->suspended == 0); | 3025 gdb_assert (lwp->suspended == 0); |
| 2772 gdb_assert (lwp->stopped); | 3026 gdb_assert (lwp->stopped); |
| 2773 | 3027 |
| 2774 /* Allow debugging the jump pad, gdb_collect, etc.. */ | 3028 /* Allow debugging the jump pad, gdb_collect, etc.. */ |
| 2775 return (supports_fast_tracepoints () | 3029 return (supports_fast_tracepoints () |
| 2776 » && in_process_agent_loaded () | 3030 » && agent_loaded_p () |
| 2777 && (gdb_breakpoint_here (lwp->stop_pc) | 3031 && (gdb_breakpoint_here (lwp->stop_pc) |
| 2778 || lwp->stopped_by_watchpoint | 3032 || lwp->stopped_by_watchpoint |
| 2779 || thread->last_resume_kind == resume_step) | 3033 || thread->last_resume_kind == resume_step) |
| 2780 && linux_fast_tracepoint_collecting (lwp, NULL)); | 3034 && linux_fast_tracepoint_collecting (lwp, NULL)); |
| 2781 } | 3035 } |
| 2782 | 3036 |
| 2783 static void | 3037 static void |
| 2784 move_out_of_jump_pad_callback (struct inferior_list_entry *entry) | 3038 move_out_of_jump_pad_callback (struct inferior_list_entry *entry) |
| 2785 { | 3039 { |
| 2786 struct lwp_info *lwp = (struct lwp_info *) entry; | 3040 struct lwp_info *lwp = (struct lwp_info *) entry; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2833 return 1; | 3087 return 1; |
| 2834 } | 3088 } |
| 2835 | 3089 |
| 2836 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL. | 3090 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL. |
| 2837 If SUSPEND, then also increase the suspend count of every LWP, | 3091 If SUSPEND, then also increase the suspend count of every LWP, |
| 2838 except EXCEPT. */ | 3092 except EXCEPT. */ |
| 2839 | 3093 |
| 2840 static void | 3094 static void |
| 2841 stop_all_lwps (int suspend, struct lwp_info *except) | 3095 stop_all_lwps (int suspend, struct lwp_info *except) |
| 2842 { | 3096 { |
| 2843 stopping_threads = 1; | 3097 /* Should not be called recursively. */ |
| 3098 gdb_assert (stopping_threads == NOT_STOPPING_THREADS); |
| 3099 |
| 3100 stopping_threads = (suspend |
| 3101 » » ? STOPPING_AND_SUSPENDING_THREADS |
| 3102 » » : STOPPING_THREADS); |
| 2844 | 3103 |
| 2845 if (suspend) | 3104 if (suspend) |
| 2846 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except); | 3105 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except); |
| 2847 else | 3106 else |
| 2848 find_inferior (&all_lwps, send_sigstop_callback, except); | 3107 find_inferior (&all_lwps, send_sigstop_callback, except); |
| 2849 for_each_inferior (&all_lwps, wait_for_sigstop); | 3108 for_each_inferior (&all_lwps, wait_for_sigstop); |
| 2850 stopping_threads = 0; | 3109 stopping_threads = NOT_STOPPING_THREADS; |
| 2851 } | 3110 } |
| 2852 | 3111 |
| 2853 /* Resume execution of the inferior process. | 3112 /* Resume execution of the inferior process. |
| 2854 If STEP is nonzero, single-step it. | 3113 If STEP is nonzero, single-step it. |
| 2855 If SIGNAL is nonzero, give it that signal. */ | 3114 If SIGNAL is nonzero, give it that signal. */ |
| 2856 | 3115 |
| 2857 static void | 3116 static void |
| 2858 linux_resume_one_lwp (struct lwp_info *lwp, | 3117 linux_resume_one_lwp (struct lwp_info *lwp, |
| 2859 int step, int signal, siginfo_t *info) | 3118 int step, int signal, siginfo_t *info) |
| 2860 { | 3119 { |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3076 | 3335 |
| 3077 thread = (struct thread_info *) entry; | 3336 thread = (struct thread_info *) entry; |
| 3078 lwp = get_thread_lwp (thread); | 3337 lwp = get_thread_lwp (thread); |
| 3079 r = arg; | 3338 r = arg; |
| 3080 | 3339 |
| 3081 for (ndx = 0; ndx < r->n; ndx++) | 3340 for (ndx = 0; ndx < r->n; ndx++) |
| 3082 { | 3341 { |
| 3083 ptid_t ptid = r->resume[ndx].thread; | 3342 ptid_t ptid = r->resume[ndx].thread; |
| 3084 if (ptid_equal (ptid, minus_one_ptid) | 3343 if (ptid_equal (ptid, minus_one_ptid) |
| 3085 || ptid_equal (ptid, entry->id) | 3344 || ptid_equal (ptid, entry->id) |
| 3086 » || (ptid_is_pid (ptid) | 3345 » /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads |
| 3087 » && (ptid_get_pid (ptid) == pid_of (lwp))) | 3346 » of PID'. */ |
| 3088 » || (ptid_get_lwp (ptid) == -1 | 3347 » || (ptid_get_pid (ptid) == pid_of (lwp) |
| 3089 » && (ptid_get_pid (ptid) == pid_of (lwp)))) | 3348 » && (ptid_is_pid (ptid) |
| 3349 » » || ptid_get_lwp (ptid) == -1))) |
| 3090 { | 3350 { |
| 3091 if (r->resume[ndx].kind == resume_stop | 3351 if (r->resume[ndx].kind == resume_stop |
| 3092 && thread->last_resume_kind == resume_stop) | 3352 && thread->last_resume_kind == resume_stop) |
| 3093 { | 3353 { |
| 3094 if (debug_threads) | 3354 if (debug_threads) |
| 3095 fprintf (stderr, "already %s LWP %ld at GDB's request\n", | 3355 fprintf (stderr, "already %s LWP %ld at GDB's request\n", |
| 3096 thread->last_status.kind == TARGET_WAITKIND_STOPPED | 3356 thread->last_status.kind == TARGET_WAITKIND_STOPPED |
| 3097 ? "stopped" | 3357 ? "stopped" |
| 3098 : "stopping", | 3358 : "stopping", |
| 3099 lwpid_of (lwp)); | 3359 lwpid_of (lwp)); |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3232 return 0; | 3492 return 0; |
| 3233 } | 3493 } |
| 3234 | 3494 |
| 3235 saved_inferior = current_inferior; | 3495 saved_inferior = current_inferior; |
| 3236 current_inferior = thread; | 3496 current_inferior = thread; |
| 3237 | 3497 |
| 3238 /* We can only step over breakpoints we know about. */ | 3498 /* We can only step over breakpoints we know about. */ |
| 3239 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc)) | 3499 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc)) |
| 3240 { | 3500 { |
| 3241 /* Don't step over a breakpoint that GDB expects to hit | 3501 /* Don't step over a breakpoint that GDB expects to hit |
| 3242 » though. */ | 3502 » though. If the condition is being evaluated on the target's side |
| 3243 if (gdb_breakpoint_here (pc)) | 3503 » and it evaluate to false, step over this breakpoint as well. */ |
| 3504 if (gdb_breakpoint_here (pc) |
| 3505 » && gdb_condition_true_at_breakpoint (pc) |
| 3506 » && gdb_no_commands_at_breakpoint (pc)) |
| 3244 { | 3507 { |
| 3245 if (debug_threads) | 3508 if (debug_threads) |
| 3246 fprintf (stderr, | 3509 fprintf (stderr, |
| 3247 "Need step over [LWP %ld]? yes, but found" | 3510 "Need step over [LWP %ld]? yes, but found" |
| 3248 " GDB breakpoint at 0x%s; skipping step over\n", | 3511 " GDB breakpoint at 0x%s; skipping step over\n", |
| 3249 lwpid_of (lwp), paddress (pc)); | 3512 lwpid_of (lwp), paddress (pc)); |
| 3250 | 3513 |
| 3251 current_inferior = saved_inferior; | 3514 current_inferior = saved_inferior; |
| 3252 return 0; | 3515 return 0; |
| 3253 } | 3516 } |
| (...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3713 fprintf (stderr, | 3976 fprintf (stderr, |
| 3714 "unstopping all lwps\n"); | 3977 "unstopping all lwps\n"); |
| 3715 } | 3978 } |
| 3716 | 3979 |
| 3717 if (unsuspend) | 3980 if (unsuspend) |
| 3718 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except); | 3981 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except); |
| 3719 else | 3982 else |
| 3720 find_inferior (&all_lwps, proceed_one_lwp, except); | 3983 find_inferior (&all_lwps, proceed_one_lwp, except); |
| 3721 } | 3984 } |
| 3722 | 3985 |
| 3723 #ifdef HAVE_LINUX_USRREGS | |
| 3724 | |
| 3725 int | |
| 3726 register_addr (int regnum) | |
| 3727 { | |
| 3728 int addr; | |
| 3729 | |
| 3730 if (regnum < 0 || regnum >= the_low_target.num_regs) | |
| 3731 error ("Invalid register number %d.", regnum); | |
| 3732 | |
| 3733 addr = the_low_target.regmap[regnum]; | |
| 3734 | |
| 3735 return addr; | |
| 3736 } | |
| 3737 | |
| 3738 /* Fetch one register. */ | |
| 3739 static void | |
| 3740 fetch_register (struct regcache *regcache, int regno) | |
| 3741 { | |
| 3742 CORE_ADDR regaddr; | |
| 3743 int i, size; | |
| 3744 char *buf; | |
| 3745 int pid; | |
| 3746 | |
| 3747 if (regno >= the_low_target.num_regs) | |
| 3748 return; | |
| 3749 if ((*the_low_target.cannot_fetch_register) (regno)) | |
| 3750 return; | |
| 3751 | |
| 3752 regaddr = register_addr (regno); | |
| 3753 if (regaddr == -1) | |
| 3754 return; | |
| 3755 | |
| 3756 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) | |
| 3757 & -sizeof (PTRACE_XFER_TYPE)); | |
| 3758 buf = alloca (size); | |
| 3759 | |
| 3760 pid = lwpid_of (get_thread_lwp (current_inferior)); | |
| 3761 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) | |
| 3762 { | |
| 3763 errno = 0; | |
| 3764 *(PTRACE_XFER_TYPE *) (buf + i) = | |
| 3765 ptrace (PTRACE_PEEKUSER, pid, | |
| 3766 /* Coerce to a uintptr_t first to avoid potential gcc warning | |
| 3767 of coercing an 8 byte integer to a 4 byte pointer. */ | |
| 3768 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0); | |
| 3769 regaddr += sizeof (PTRACE_XFER_TYPE); | |
| 3770 if (errno != 0) | |
| 3771 error ("reading register %d: %s", regno, strerror (errno)); | |
| 3772 } | |
| 3773 | |
| 3774 if (the_low_target.supply_ptrace_register) | |
| 3775 the_low_target.supply_ptrace_register (regcache, regno, buf); | |
| 3776 else | |
| 3777 supply_register (regcache, regno, buf); | |
| 3778 } | |
| 3779 | |
| 3780 /* Store one register. */ | |
| 3781 static void | |
| 3782 store_register (struct regcache *regcache, int regno) | |
| 3783 { | |
| 3784 CORE_ADDR regaddr; | |
| 3785 int i, size; | |
| 3786 char *buf; | |
| 3787 int pid; | |
| 3788 | |
| 3789 if (regno >= the_low_target.num_regs) | |
| 3790 return; | |
| 3791 if ((*the_low_target.cannot_store_register) (regno)) | |
| 3792 return; | |
| 3793 | |
| 3794 regaddr = register_addr (regno); | |
| 3795 if (regaddr == -1) | |
| 3796 return; | |
| 3797 | |
| 3798 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) | |
| 3799 & -sizeof (PTRACE_XFER_TYPE)); | |
| 3800 buf = alloca (size); | |
| 3801 memset (buf, 0, size); | |
| 3802 | |
| 3803 if (the_low_target.collect_ptrace_register) | |
| 3804 the_low_target.collect_ptrace_register (regcache, regno, buf); | |
| 3805 else | |
| 3806 collect_register (regcache, regno, buf); | |
| 3807 | |
| 3808 pid = lwpid_of (get_thread_lwp (current_inferior)); | |
| 3809 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) | |
| 3810 { | |
| 3811 errno = 0; | |
| 3812 ptrace (PTRACE_POKEUSER, pid, | |
| 3813 /* Coerce to a uintptr_t first to avoid potential gcc warning | |
| 3814 about coercing an 8 byte integer to a 4 byte pointer. */ | |
| 3815 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, | |
| 3816 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i)); | |
| 3817 if (errno != 0) | |
| 3818 { | |
| 3819 /* At this point, ESRCH should mean the process is | |
| 3820 already gone, in which case we simply ignore attempts | |
| 3821 to change its registers. See also the related | |
| 3822 comment in linux_resume_one_lwp. */ | |
| 3823 if (errno == ESRCH) | |
| 3824 return; | |
| 3825 | |
| 3826 if ((*the_low_target.cannot_store_register) (regno) == 0) | |
| 3827 error ("writing register %d: %s", regno, strerror (errno)); | |
| 3828 } | |
| 3829 regaddr += sizeof (PTRACE_XFER_TYPE); | |
| 3830 } | |
| 3831 } | |
| 3832 | |
| 3833 /* Fetch all registers, or just one, from the child process. */ | |
| 3834 static void | |
| 3835 usr_fetch_inferior_registers (struct regcache *regcache, int regno) | |
| 3836 { | |
| 3837 if (regno == -1) | |
| 3838 for (regno = 0; regno < the_low_target.num_regs; regno++) | |
| 3839 fetch_register (regcache, regno); | |
| 3840 else | |
| 3841 fetch_register (regcache, regno); | |
| 3842 } | |
| 3843 | |
| 3844 /* Store our register values back into the inferior. | |
| 3845 If REGNO is -1, do this for all registers. | |
| 3846 Otherwise, REGNO specifies which register (so we can save time). */ | |
| 3847 static void | |
| 3848 usr_store_inferior_registers (struct regcache *regcache, int regno) | |
| 3849 { | |
| 3850 if (regno == -1) | |
| 3851 for (regno = 0; regno < the_low_target.num_regs; regno++) | |
| 3852 store_register (regcache, regno); | |
| 3853 else | |
| 3854 store_register (regcache, regno); | |
| 3855 } | |
| 3856 #endif /* HAVE_LINUX_USRREGS */ | |
| 3857 | |
| 3858 | |
| 3859 | 3986 |
| 3860 #ifdef HAVE_LINUX_REGSETS | 3987 #ifdef HAVE_LINUX_REGSETS |
| 3861 | 3988 |
| 3989 #define use_linux_regsets 1 |
| 3990 |
| 3862 static int | 3991 static int |
| 3863 regsets_fetch_inferior_registers (struct regcache *regcache) | 3992 regsets_fetch_inferior_registers (struct regcache *regcache) |
| 3864 { | 3993 { |
| 3865 struct regset_info *regset; | 3994 struct regset_info *regset; |
| 3866 int saw_general_regs = 0; | 3995 int saw_general_regs = 0; |
| 3867 int pid; | 3996 int pid; |
| 3868 struct iovec iov; | 3997 struct iovec iov; |
| 3869 | 3998 |
| 3870 regset = target_regsets; | 3999 regset = target_regsets; |
| 3871 | 4000 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3887 if (nt_type) | 4016 if (nt_type) |
| 3888 { | 4017 { |
| 3889 iov.iov_base = buf; | 4018 iov.iov_base = buf; |
| 3890 iov.iov_len = regset->size; | 4019 iov.iov_len = regset->size; |
| 3891 data = (void *) &iov; | 4020 data = (void *) &iov; |
| 3892 } | 4021 } |
| 3893 else | 4022 else |
| 3894 data = buf; | 4023 data = buf; |
| 3895 | 4024 |
| 3896 #ifndef __sparc__ | 4025 #ifndef __sparc__ |
| 3897 res = ptrace (regset->get_request, pid, nt_type, data); | 4026 res = ptrace (regset->get_request, pid, |
| 4027 » » (PTRACE_ARG3_TYPE) (long) nt_type, data); |
| 3898 #else | 4028 #else |
| 3899 res = ptrace (regset->get_request, pid, data, nt_type); | 4029 res = ptrace (regset->get_request, pid, data, nt_type); |
| 3900 #endif | 4030 #endif |
| 3901 if (res < 0) | 4031 if (res < 0) |
| 3902 { | 4032 { |
| 3903 if (errno == EIO) | 4033 if (errno == EIO) |
| 3904 { | 4034 { |
| 3905 /* If we get EIO on a regset, do not try it again for | 4035 /* If we get EIO on a regset, do not try it again for |
| 3906 this process. */ | 4036 this process. */ |
| 3907 disabled_regsets[regset - target_regsets] = 1; | 4037 disabled_regsets[regset - target_regsets] = 1; |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3960 if (nt_type) | 4090 if (nt_type) |
| 3961 { | 4091 { |
| 3962 iov.iov_base = buf; | 4092 iov.iov_base = buf; |
| 3963 iov.iov_len = regset->size; | 4093 iov.iov_len = regset->size; |
| 3964 data = (void *) &iov; | 4094 data = (void *) &iov; |
| 3965 } | 4095 } |
| 3966 else | 4096 else |
| 3967 data = buf; | 4097 data = buf; |
| 3968 | 4098 |
| 3969 #ifndef __sparc__ | 4099 #ifndef __sparc__ |
| 3970 res = ptrace (regset->get_request, pid, nt_type, data); | 4100 res = ptrace (regset->get_request, pid, |
| 4101 » » (PTRACE_ARG3_TYPE) (long) nt_type, data); |
| 3971 #else | 4102 #else |
| 3972 res = ptrace (regset->get_request, pid, &iov, data); | 4103 res = ptrace (regset->get_request, pid, data, nt_type); |
| 3973 #endif | 4104 #endif |
| 3974 | 4105 |
| 3975 if (res == 0) | 4106 if (res == 0) |
| 3976 { | 4107 { |
| 3977 /* Then overlay our cached registers on that. */ | 4108 /* Then overlay our cached registers on that. */ |
| 3978 regset->fill_function (regcache, buf); | 4109 regset->fill_function (regcache, buf); |
| 3979 | 4110 |
| 3980 /* Only now do we write the register set. */ | 4111 /* Only now do we write the register set. */ |
| 3981 #ifndef __sparc__ | 4112 #ifndef __sparc__ |
| 3982 » res = ptrace (regset->set_request, pid, nt_type, data); | 4113 » res = ptrace (regset->set_request, pid, |
| 4114 » » » (PTRACE_ARG3_TYPE) (long) nt_type, data); |
| 3983 #else | 4115 #else |
| 3984 res = ptrace (regset->set_request, pid, data, nt_type); | 4116 res = ptrace (regset->set_request, pid, data, nt_type); |
| 3985 #endif | 4117 #endif |
| 3986 } | 4118 } |
| 3987 | 4119 |
| 3988 if (res < 0) | 4120 if (res < 0) |
| 3989 { | 4121 { |
| 3990 if (errno == EIO) | 4122 if (errno == EIO) |
| 3991 { | 4123 { |
| 3992 /* If we get EIO on a regset, do not try it again for | 4124 /* If we get EIO on a regset, do not try it again for |
| (...skipping 18 matching lines...) Expand all Loading... |
| 4011 } | 4143 } |
| 4012 else if (regset->type == GENERAL_REGS) | 4144 else if (regset->type == GENERAL_REGS) |
| 4013 saw_general_regs = 1; | 4145 saw_general_regs = 1; |
| 4014 regset ++; | 4146 regset ++; |
| 4015 free (buf); | 4147 free (buf); |
| 4016 } | 4148 } |
| 4017 if (saw_general_regs) | 4149 if (saw_general_regs) |
| 4018 return 0; | 4150 return 0; |
| 4019 else | 4151 else |
| 4020 return 1; | 4152 return 1; |
| 4021 return 0; | 4153 } |
| 4022 } | 4154 |
| 4023 | 4155 #else /* !HAVE_LINUX_REGSETS */ |
| 4024 #endif /* HAVE_LINUX_REGSETS */ | 4156 |
| 4157 #define use_linux_regsets 0 |
| 4158 #define regsets_fetch_inferior_registers(regcache) 1 |
| 4159 #define regsets_store_inferior_registers(regcache) 1 |
| 4160 |
| 4161 #endif |
| 4162 |
| 4163 /* Return 1 if register REGNO is supported by one of the regset ptrace |
| 4164 calls or 0 if it has to be transferred individually. */ |
| 4165 |
| 4166 static int |
| 4167 linux_register_in_regsets (int regno) |
| 4168 { |
| 4169 unsigned char mask = 1 << (regno % 8); |
| 4170 size_t index = regno / 8; |
| 4171 |
| 4172 return (use_linux_regsets |
| 4173 » && (the_low_target.regset_bitmap == NULL |
| 4174 » || (the_low_target.regset_bitmap[index] & mask) != 0)); |
| 4175 } |
| 4176 |
| 4177 #ifdef HAVE_LINUX_USRREGS |
| 4178 |
| 4179 int |
| 4180 register_addr (int regnum) |
| 4181 { |
| 4182 int addr; |
| 4183 |
| 4184 if (regnum < 0 || regnum >= the_low_target.num_regs) |
| 4185 error ("Invalid register number %d.", regnum); |
| 4186 |
| 4187 addr = the_low_target.regmap[regnum]; |
| 4188 |
| 4189 return addr; |
| 4190 } |
| 4191 |
| 4192 /* Fetch one register. */ |
| 4193 static void |
| 4194 fetch_register (struct regcache *regcache, int regno) |
| 4195 { |
| 4196 CORE_ADDR regaddr; |
| 4197 int i, size; |
| 4198 char *buf; |
| 4199 int pid; |
| 4200 |
| 4201 if (regno >= the_low_target.num_regs) |
| 4202 return; |
| 4203 if ((*the_low_target.cannot_fetch_register) (regno)) |
| 4204 return; |
| 4205 |
| 4206 regaddr = register_addr (regno); |
| 4207 if (regaddr == -1) |
| 4208 return; |
| 4209 |
| 4210 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) |
| 4211 » & -sizeof (PTRACE_XFER_TYPE)); |
| 4212 buf = alloca (size); |
| 4213 |
| 4214 pid = lwpid_of (get_thread_lwp (current_inferior)); |
| 4215 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) |
| 4216 { |
| 4217 errno = 0; |
| 4218 *(PTRACE_XFER_TYPE *) (buf + i) = |
| 4219 » ptrace (PTRACE_PEEKUSER, pid, |
| 4220 » » /* Coerce to a uintptr_t first to avoid potential gcc warning |
| 4221 » » of coercing an 8 byte integer to a 4 byte pointer. */ |
| 4222 » » (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0); |
| 4223 regaddr += sizeof (PTRACE_XFER_TYPE); |
| 4224 if (errno != 0) |
| 4225 » error ("reading register %d: %s", regno, strerror (errno)); |
| 4226 } |
| 4227 |
| 4228 if (the_low_target.supply_ptrace_register) |
| 4229 the_low_target.supply_ptrace_register (regcache, regno, buf); |
| 4230 else |
| 4231 supply_register (regcache, regno, buf); |
| 4232 } |
| 4233 |
| 4234 /* Store one register. */ |
| 4235 static void |
| 4236 store_register (struct regcache *regcache, int regno) |
| 4237 { |
| 4238 CORE_ADDR regaddr; |
| 4239 int i, size; |
| 4240 char *buf; |
| 4241 int pid; |
| 4242 |
| 4243 if (regno >= the_low_target.num_regs) |
| 4244 return; |
| 4245 if ((*the_low_target.cannot_store_register) (regno)) |
| 4246 return; |
| 4247 |
| 4248 regaddr = register_addr (regno); |
| 4249 if (regaddr == -1) |
| 4250 return; |
| 4251 |
| 4252 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) |
| 4253 » & -sizeof (PTRACE_XFER_TYPE)); |
| 4254 buf = alloca (size); |
| 4255 memset (buf, 0, size); |
| 4256 |
| 4257 if (the_low_target.collect_ptrace_register) |
| 4258 the_low_target.collect_ptrace_register (regcache, regno, buf); |
| 4259 else |
| 4260 collect_register (regcache, regno, buf); |
| 4261 |
| 4262 pid = lwpid_of (get_thread_lwp (current_inferior)); |
| 4263 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) |
| 4264 { |
| 4265 errno = 0; |
| 4266 ptrace (PTRACE_POKEUSER, pid, |
| 4267 » /* Coerce to a uintptr_t first to avoid potential gcc warning |
| 4268 » about coercing an 8 byte integer to a 4 byte pointer. */ |
| 4269 » (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, |
| 4270 » (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i)); |
| 4271 if (errno != 0) |
| 4272 » { |
| 4273 » /* At this point, ESRCH should mean the process is |
| 4274 » already gone, in which case we simply ignore attempts |
| 4275 » to change its registers. See also the related |
| 4276 » comment in linux_resume_one_lwp. */ |
| 4277 » if (errno == ESRCH) |
| 4278 » return; |
| 4279 |
| 4280 » if ((*the_low_target.cannot_store_register) (regno) == 0) |
| 4281 » error ("writing register %d: %s", regno, strerror (errno)); |
| 4282 » } |
| 4283 regaddr += sizeof (PTRACE_XFER_TYPE); |
| 4284 } |
| 4285 } |
| 4286 |
| 4287 /* Fetch all registers, or just one, from the child process. |
| 4288 If REGNO is -1, do this for all registers, skipping any that are |
| 4289 assumed to have been retrieved by regsets_fetch_inferior_registers, |
| 4290 unless ALL is non-zero. |
| 4291 Otherwise, REGNO specifies which register (so we can save time). */ |
| 4292 static void |
| 4293 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all) |
| 4294 { |
| 4295 if (regno == -1) |
| 4296 { |
| 4297 for (regno = 0; regno < the_low_target.num_regs; regno++) |
| 4298 » if (all || !linux_register_in_regsets (regno)) |
| 4299 » fetch_register (regcache, regno); |
| 4300 } |
| 4301 else |
| 4302 fetch_register (regcache, regno); |
| 4303 } |
| 4304 |
| 4305 /* Store our register values back into the inferior. |
| 4306 If REGNO is -1, do this for all registers, skipping any that are |
| 4307 assumed to have been saved by regsets_store_inferior_registers, |
| 4308 unless ALL is non-zero. |
| 4309 Otherwise, REGNO specifies which register (so we can save time). */ |
| 4310 static void |
| 4311 usr_store_inferior_registers (struct regcache *regcache, int regno, int all) |
| 4312 { |
| 4313 if (regno == -1) |
| 4314 { |
| 4315 for (regno = 0; regno < the_low_target.num_regs; regno++) |
| 4316 » if (all || !linux_register_in_regsets (regno)) |
| 4317 » store_register (regcache, regno); |
| 4318 } |
| 4319 else |
| 4320 store_register (regcache, regno); |
| 4321 } |
| 4322 |
| 4323 #else /* !HAVE_LINUX_USRREGS */ |
| 4324 |
| 4325 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0) |
| 4326 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0) |
| 4327 |
| 4328 #endif |
| 4025 | 4329 |
| 4026 | 4330 |
| 4027 void | 4331 void |
| 4028 linux_fetch_registers (struct regcache *regcache, int regno) | 4332 linux_fetch_registers (struct regcache *regcache, int regno) |
| 4029 { | 4333 { |
| 4030 #ifdef HAVE_LINUX_REGSETS | 4334 int use_regsets; |
| 4031 if (regsets_fetch_inferior_registers (regcache) == 0) | 4335 int all = 0; |
| 4032 return; | 4336 |
| 4033 #endif | 4337 if (regno == -1) |
| 4034 #ifdef HAVE_LINUX_USRREGS | 4338 { |
| 4035 usr_fetch_inferior_registers (regcache, regno); | 4339 if (the_low_target.fetch_register != NULL) |
| 4036 #endif | 4340 » for (regno = 0; regno < the_low_target.num_regs; regno++) |
| 4341 » (*the_low_target.fetch_register) (regcache, regno); |
| 4342 |
| 4343 all = regsets_fetch_inferior_registers (regcache); |
| 4344 usr_fetch_inferior_registers (regcache, -1, all); |
| 4345 } |
| 4346 else |
| 4347 { |
| 4348 if (the_low_target.fetch_register != NULL |
| 4349 » && (*the_low_target.fetch_register) (regcache, regno)) |
| 4350 » return; |
| 4351 |
| 4352 use_regsets = linux_register_in_regsets (regno); |
| 4353 if (use_regsets) |
| 4354 » all = regsets_fetch_inferior_registers (regcache); |
| 4355 if (!use_regsets || all) |
| 4356 » usr_fetch_inferior_registers (regcache, regno, 1); |
| 4357 } |
| 4037 } | 4358 } |
| 4038 | 4359 |
| 4039 void | 4360 void |
| 4040 linux_store_registers (struct regcache *regcache, int regno) | 4361 linux_store_registers (struct regcache *regcache, int regno) |
| 4041 { | 4362 { |
| 4042 #ifdef HAVE_LINUX_REGSETS | 4363 int use_regsets; |
| 4043 if (regsets_store_inferior_registers (regcache) == 0) | 4364 int all = 0; |
| 4044 return; | 4365 |
| 4045 #endif | 4366 if (regno == -1) |
| 4046 #ifdef HAVE_LINUX_USRREGS | 4367 { |
| 4047 usr_store_inferior_registers (regcache, regno); | 4368 all = regsets_store_inferior_registers (regcache); |
| 4048 #endif | 4369 usr_store_inferior_registers (regcache, regno, all); |
| 4049 } | 4370 } |
| 4050 | 4371 else |
| 4051 | 4372 { |
| 4373 use_regsets = linux_register_in_regsets (regno); |
| 4374 if (use_regsets) |
| 4375 » all = regsets_store_inferior_registers (regcache); |
| 4376 if (!use_regsets || all) |
| 4377 » usr_store_inferior_registers (regcache, regno, 1); |
| 4378 } |
| 4379 } |
| 4380 |
| 4381 |
| 4052 /* Copy LEN bytes from inferior's memory starting at MEMADDR | 4382 /* Copy LEN bytes from inferior's memory starting at MEMADDR |
| 4053 to debugger memory starting at MYADDR. */ | 4383 to debugger memory starting at MYADDR. */ |
| 4054 | 4384 |
| 4055 static int | 4385 static int |
| 4056 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) | 4386 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) |
| 4057 { | 4387 { |
| 4388 int pid = lwpid_of (get_thread_lwp (current_inferior)); |
| 4389 register PTRACE_XFER_TYPE *buffer; |
| 4390 register CORE_ADDR addr; |
| 4391 register int count; |
| 4392 char filename[64]; |
| 4058 register int i; | 4393 register int i; |
| 4059 /* Round starting address down to longword boundary. */ | 4394 int ret; |
| 4060 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); | |
| 4061 /* Round ending address up; get number of longwords that makes. */ | |
| 4062 register int count | |
| 4063 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) | |
| 4064 / sizeof (PTRACE_XFER_TYPE); | |
| 4065 /* Allocate buffer of that many longwords. */ | |
| 4066 register PTRACE_XFER_TYPE *buffer | |
| 4067 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE)); | |
| 4068 int fd; | 4395 int fd; |
| 4069 char filename[64]; | |
| 4070 int pid = lwpid_of (get_thread_lwp (current_inferior)); | |
| 4071 | 4396 |
| 4072 /* Try using /proc. Don't bother for one word. */ | 4397 /* Try using /proc. Don't bother for one word. */ |
| 4073 if (len >= 3 * sizeof (long)) | 4398 if (len >= 3 * sizeof (long)) |
| 4074 { | 4399 { |
| 4400 int bytes; |
| 4401 |
| 4075 /* We could keep this file open and cache it - possibly one per | 4402 /* We could keep this file open and cache it - possibly one per |
| 4076 thread. That requires some juggling, but is even faster. */ | 4403 thread. That requires some juggling, but is even faster. */ |
| 4077 sprintf (filename, "/proc/%d/mem", pid); | 4404 sprintf (filename, "/proc/%d/mem", pid); |
| 4078 fd = open (filename, O_RDONLY | O_LARGEFILE); | 4405 fd = open (filename, O_RDONLY | O_LARGEFILE); |
| 4079 if (fd == -1) | 4406 if (fd == -1) |
| 4080 goto no_proc; | 4407 goto no_proc; |
| 4081 | 4408 |
| 4082 /* If pread64 is available, use it. It's faster if the kernel | 4409 /* If pread64 is available, use it. It's faster if the kernel |
| 4083 supports it (only one syscall), and it's 64-bit safe even on | 4410 supports it (only one syscall), and it's 64-bit safe even on |
| 4084 32-bit platforms (for instance, SPARC debugging a SPARC64 | 4411 32-bit platforms (for instance, SPARC debugging a SPARC64 |
| 4085 application). */ | 4412 application). */ |
| 4086 #ifdef HAVE_PREAD64 | 4413 #ifdef HAVE_PREAD64 |
| 4087 if (pread64 (fd, myaddr, len, memaddr) != len) | 4414 bytes = pread64 (fd, myaddr, len, memaddr); |
| 4088 #else | 4415 #else |
| 4089 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len) | 4416 bytes = -1; |
| 4417 if (lseek (fd, memaddr, SEEK_SET) != -1) |
| 4418 » bytes = read (fd, myaddr, len); |
| 4090 #endif | 4419 #endif |
| 4091 { | |
| 4092 close (fd); | |
| 4093 goto no_proc; | |
| 4094 } | |
| 4095 | 4420 |
| 4096 close (fd); | 4421 close (fd); |
| 4097 return 0; | 4422 if (bytes == len) |
| 4423 » return 0; |
| 4424 |
| 4425 /* Some data was read, we'll try to get the rest with ptrace. */ |
| 4426 if (bytes > 0) |
| 4427 » { |
| 4428 » memaddr += bytes; |
| 4429 » myaddr += bytes; |
| 4430 » len -= bytes; |
| 4431 » } |
| 4098 } | 4432 } |
| 4099 | 4433 |
| 4100 no_proc: | 4434 no_proc: |
| 4435 /* Round starting address down to longword boundary. */ |
| 4436 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); |
| 4437 /* Round ending address up; get number of longwords that makes. */ |
| 4438 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) |
| 4439 / sizeof (PTRACE_XFER_TYPE)); |
| 4440 /* Allocate buffer of that many longwords. */ |
| 4441 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE)); |
| 4442 |
| 4101 /* Read all the longwords */ | 4443 /* Read all the longwords */ |
| 4444 errno = 0; |
| 4102 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) | 4445 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) |
| 4103 { | 4446 { |
| 4104 errno = 0; | |
| 4105 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning | 4447 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning |
| 4106 about coercing an 8 byte integer to a 4 byte pointer. */ | 4448 about coercing an 8 byte integer to a 4 byte pointer. */ |
| 4107 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, | 4449 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, |
| 4108 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0); | 4450 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0); |
| 4109 if (errno) | 4451 if (errno) |
| 4110 » return errno; | 4452 » break; |
| 4453 } |
| 4454 ret = errno; |
| 4455 |
| 4456 /* Copy appropriate bytes out of the buffer. */ |
| 4457 if (i > 0) |
| 4458 { |
| 4459 i *= sizeof (PTRACE_XFER_TYPE); |
| 4460 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1); |
| 4461 memcpy (myaddr, |
| 4462 » (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), |
| 4463 » i < len ? i : len); |
| 4111 } | 4464 } |
| 4112 | 4465 |
| 4113 /* Copy appropriate bytes out of the buffer. */ | 4466 return ret; |
| 4114 memcpy (myaddr, | |
| 4115 » (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), | |
| 4116 » len); | |
| 4117 | |
| 4118 return 0; | |
| 4119 } | 4467 } |
| 4120 | 4468 |
| 4121 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's | 4469 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's |
| 4122 memory at MEMADDR. On failure (cannot write to the inferior) | 4470 memory at MEMADDR. On failure (cannot write to the inferior) |
| 4123 returns the value of errno. */ | 4471 returns the value of errno. */ |
| 4124 | 4472 |
| 4125 static int | 4473 static int |
| 4126 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) | 4474 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) |
| 4127 { | 4475 { |
| 4128 register int i; | 4476 register int i; |
| (...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4451 | 4799 |
| 4452 static CORE_ADDR | 4800 static CORE_ADDR |
| 4453 linux_stopped_data_address (void) | 4801 linux_stopped_data_address (void) |
| 4454 { | 4802 { |
| 4455 struct lwp_info *lwp = get_thread_lwp (current_inferior); | 4803 struct lwp_info *lwp = get_thread_lwp (current_inferior); |
| 4456 | 4804 |
| 4457 return lwp->stopped_data_address; | 4805 return lwp->stopped_data_address; |
| 4458 } | 4806 } |
| 4459 | 4807 |
| 4460 #if defined(__UCLIBC__) && defined(HAS_NOMMU) | 4808 #if defined(__UCLIBC__) && defined(HAS_NOMMU) |
| 4809 #if ! (defined(PT_TEXT_ADDR) \ |
| 4810 || defined(PT_DATA_ADDR) \ |
| 4811 || defined(PT_TEXT_END_ADDR)) |
| 4461 #if defined(__mcoldfire__) | 4812 #if defined(__mcoldfire__) |
| 4462 /* These should really be defined in the kernel's ptrace.h header. */ | 4813 /* These should really be defined in the kernel's ptrace.h header. */ |
| 4463 #define PT_TEXT_ADDR 49*4 | 4814 #define PT_TEXT_ADDR 49*4 |
| 4464 #define PT_DATA_ADDR 50*4 | 4815 #define PT_DATA_ADDR 50*4 |
| 4465 #define PT_TEXT_END_ADDR 51*4 | 4816 #define PT_TEXT_END_ADDR 51*4 |
| 4466 #elif defined(BFIN) | 4817 #elif defined(BFIN) |
| 4467 #define PT_TEXT_ADDR 220 | 4818 #define PT_TEXT_ADDR 220 |
| 4468 #define PT_TEXT_END_ADDR 224 | 4819 #define PT_TEXT_END_ADDR 224 |
| 4469 #define PT_DATA_ADDR 228 | 4820 #define PT_DATA_ADDR 228 |
| 4470 #elif defined(__TMS320C6X__) | 4821 #elif defined(__TMS320C6X__) |
| 4471 #define PT_TEXT_ADDR (0x10000*4) | 4822 #define PT_TEXT_ADDR (0x10000*4) |
| 4472 #define PT_DATA_ADDR (0x10004*4) | 4823 #define PT_DATA_ADDR (0x10004*4) |
| 4473 #define PT_TEXT_END_ADDR (0x10008*4) | 4824 #define PT_TEXT_END_ADDR (0x10008*4) |
| 4474 #endif | 4825 #endif |
| 4826 #endif |
| 4475 | 4827 |
| 4476 /* Under uClinux, programs are loaded at non-zero offsets, which we need | 4828 /* Under uClinux, programs are loaded at non-zero offsets, which we need |
| 4477 to tell gdb about. */ | 4829 to tell gdb about. */ |
| 4478 | 4830 |
| 4479 static int | 4831 static int |
| 4480 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p) | 4832 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p) |
| 4481 { | 4833 { |
| 4482 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR) | 4834 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR) |
| 4483 unsigned long text, text_end, data; | 4835 unsigned long text, text_end, data; |
| 4484 int pid = lwpid_of (get_thread_lwp (current_inferior)); | 4836 int pid = lwpid_of (get_thread_lwp (current_inferior)); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 4515 unsigned char *readbuf, unsigned const char *writebuf, | 4867 unsigned char *readbuf, unsigned const char *writebuf, |
| 4516 CORE_ADDR offset, int len) | 4868 CORE_ADDR offset, int len) |
| 4517 { | 4869 { |
| 4518 return linux_common_xfer_osdata (annex, readbuf, offset, len); | 4870 return linux_common_xfer_osdata (annex, readbuf, offset, len); |
| 4519 } | 4871 } |
| 4520 | 4872 |
| 4521 /* Convert a native/host siginfo object, into/from the siginfo in the | 4873 /* Convert a native/host siginfo object, into/from the siginfo in the |
| 4522 layout of the inferiors' architecture. */ | 4874 layout of the inferiors' architecture. */ |
| 4523 | 4875 |
| 4524 static void | 4876 static void |
| 4525 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction) | 4877 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction) |
| 4526 { | 4878 { |
| 4527 int done = 0; | 4879 int done = 0; |
| 4528 | 4880 |
| 4529 if (the_low_target.siginfo_fixup != NULL) | 4881 if (the_low_target.siginfo_fixup != NULL) |
| 4530 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction); | 4882 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction); |
| 4531 | 4883 |
| 4532 /* If there was no callback, or the callback didn't do anything, | 4884 /* If there was no callback, or the callback didn't do anything, |
| 4533 then just do a straight memcpy. */ | 4885 then just do a straight memcpy. */ |
| 4534 if (!done) | 4886 if (!done) |
| 4535 { | 4887 { |
| 4536 if (direction == 1) | 4888 if (direction == 1) |
| 4537 » memcpy (siginfo, inf_siginfo, sizeof (struct siginfo)); | 4889 » memcpy (siginfo, inf_siginfo, sizeof (siginfo_t)); |
| 4538 else | 4890 else |
| 4539 » memcpy (inf_siginfo, siginfo, sizeof (struct siginfo)); | 4891 » memcpy (inf_siginfo, siginfo, sizeof (siginfo_t)); |
| 4540 } | 4892 } |
| 4541 } | 4893 } |
| 4542 | 4894 |
| 4543 static int | 4895 static int |
| 4544 linux_xfer_siginfo (const char *annex, unsigned char *readbuf, | 4896 linux_xfer_siginfo (const char *annex, unsigned char *readbuf, |
| 4545 unsigned const char *writebuf, CORE_ADDR offset, int len) | 4897 unsigned const char *writebuf, CORE_ADDR offset, int len) |
| 4546 { | 4898 { |
| 4547 int pid; | 4899 int pid; |
| 4548 struct siginfo siginfo; | 4900 siginfo_t siginfo; |
| 4549 char inf_siginfo[sizeof (struct siginfo)]; | 4901 char inf_siginfo[sizeof (siginfo_t)]; |
| 4550 | 4902 |
| 4551 if (current_inferior == NULL) | 4903 if (current_inferior == NULL) |
| 4552 return -1; | 4904 return -1; |
| 4553 | 4905 |
| 4554 pid = lwpid_of (get_thread_lwp (current_inferior)); | 4906 pid = lwpid_of (get_thread_lwp (current_inferior)); |
| 4555 | 4907 |
| 4556 if (debug_threads) | 4908 if (debug_threads) |
| 4557 fprintf (stderr, "%s siginfo for lwp %d.\n", | 4909 fprintf (stderr, "%s siginfo for lwp %d.\n", |
| 4558 readbuf != NULL ? "Reading" : "Writing", | 4910 readbuf != NULL ? "Reading" : "Writing", |
| 4559 pid); | 4911 pid); |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4687 static int | 5039 static int |
| 4688 linux_supports_disable_randomization (void) | 5040 linux_supports_disable_randomization (void) |
| 4689 { | 5041 { |
| 4690 #ifdef HAVE_PERSONALITY | 5042 #ifdef HAVE_PERSONALITY |
| 4691 return 1; | 5043 return 1; |
| 4692 #else | 5044 #else |
| 4693 return 0; | 5045 return 0; |
| 4694 #endif | 5046 #endif |
| 4695 } | 5047 } |
| 4696 | 5048 |
| 5049 static int |
| 5050 linux_supports_agent (void) |
| 5051 { |
| 5052 return 1; |
| 5053 } |
| 5054 |
| 4697 /* Enumerate spufs IDs for process PID. */ | 5055 /* Enumerate spufs IDs for process PID. */ |
| 4698 static int | 5056 static int |
| 4699 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len) | 5057 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len) |
| 4700 { | 5058 { |
| 4701 int pos = 0; | 5059 int pos = 0; |
| 4702 int written = 0; | 5060 int written = 0; |
| 4703 char path[128]; | 5061 char path[128]; |
| 4704 DIR *dir; | 5062 DIR *dir; |
| 4705 struct dirent *entry; | 5063 struct dirent *entry; |
| 4706 | 5064 |
| (...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5076 else | 5434 else |
| 5077 { | 5435 { |
| 5078 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); | 5436 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); |
| 5079 | 5437 |
| 5080 if (p->p_type == PT_PHDR) | 5438 if (p->p_type == PT_PHDR) |
| 5081 relocation = phdr_memaddr - p->p_vaddr; | 5439 relocation = phdr_memaddr - p->p_vaddr; |
| 5082 } | 5440 } |
| 5083 | 5441 |
| 5084 if (relocation == -1) | 5442 if (relocation == -1) |
| 5085 { | 5443 { |
| 5086 warning ("Unexpected missing PT_PHDR"); | 5444 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately |
| 5445 » any real world executables, including PIE executables, have always |
| 5446 » PT_PHDR present. PT_PHDR is not present in some shared libraries or |
| 5447 » in fpc (Free Pascal 2.4) binaries but neither of those have a need for |
| 5448 » or present DT_DEBUG anyway (fpc binaries are statically linked). |
| 5449 |
| 5450 » Therefore if there exists DT_DEBUG there is always also PT_PHDR. |
| 5451 |
| 5452 » GDB could find RELOCATION also from AT_ENTRY - e_entry. */ |
| 5453 |
| 5087 return 0; | 5454 return 0; |
| 5088 } | 5455 } |
| 5089 | 5456 |
| 5090 for (i = 0; i < num_phdr; i++) | 5457 for (i = 0; i < num_phdr; i++) |
| 5091 { | 5458 { |
| 5092 if (is_elf64) | 5459 if (is_elf64) |
| 5093 { | 5460 { |
| 5094 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size); | 5461 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size); |
| 5095 | 5462 |
| 5096 if (p->p_type == PT_DYNAMIC) | 5463 if (p->p_type == PT_DYNAMIC) |
| 5097 return p->p_vaddr + relocation; | 5464 return p->p_vaddr + relocation; |
| 5098 } | 5465 } |
| 5099 else | 5466 else |
| 5100 { | 5467 { |
| 5101 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); | 5468 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size); |
| 5102 | 5469 |
| 5103 if (p->p_type == PT_DYNAMIC) | 5470 if (p->p_type == PT_DYNAMIC) |
| 5104 return p->p_vaddr + relocation; | 5471 return p->p_vaddr + relocation; |
| 5105 } | 5472 } |
| 5106 } | 5473 } |
| 5107 | 5474 |
| 5108 return 0; | 5475 return 0; |
| 5109 } | 5476 } |
| 5110 | 5477 |
| 5111 /* Return &_r_debug in the inferior, or -1 if not present. Return value | 5478 /* Return &_r_debug in the inferior, or -1 if not present. Return value |
| 5112 can be 0 if the inferior does not yet have the library list initialized. */ | 5479 can be 0 if the inferior does not yet have the library list initialized. |
| 5480 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of |
| 5481 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */ |
| 5113 | 5482 |
| 5114 static CORE_ADDR | 5483 static CORE_ADDR |
| 5115 get_r_debug (const int pid, const int is_elf64) | 5484 get_r_debug (const int pid, const int is_elf64) |
| 5116 { | 5485 { |
| 5117 CORE_ADDR dynamic_memaddr; | 5486 CORE_ADDR dynamic_memaddr; |
| 5118 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn); | 5487 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn); |
| 5119 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */ | 5488 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */ |
| 5489 CORE_ADDR map = -1; |
| 5120 | 5490 |
| 5121 dynamic_memaddr = get_dynamic (pid, is_elf64); | 5491 dynamic_memaddr = get_dynamic (pid, is_elf64); |
| 5122 if (dynamic_memaddr == 0) | 5492 if (dynamic_memaddr == 0) |
| 5123 return (CORE_ADDR) -1; | 5493 return map; |
| 5124 | 5494 |
| 5125 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0) | 5495 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0) |
| 5126 { | 5496 { |
| 5127 if (is_elf64) | 5497 if (is_elf64) |
| 5128 { | 5498 { |
| 5129 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf; | 5499 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf; |
| 5500 #ifdef DT_MIPS_RLD_MAP |
| 5501 union |
| 5502 { |
| 5503 Elf64_Xword map; |
| 5504 unsigned char buf[sizeof (Elf64_Xword)]; |
| 5505 } |
| 5506 rld_map; |
| 5130 | 5507 |
| 5131 » if (dyn->d_tag == DT_DEBUG) | 5508 » if (dyn->d_tag == DT_MIPS_RLD_MAP) |
| 5132 » return dyn->d_un.d_val; | 5509 » { |
| 5510 » if (linux_read_memory (dyn->d_un.d_val, |
| 5511 » » » » rld_map.buf, sizeof (rld_map.buf)) == 0) |
| 5512 » » return rld_map.map; |
| 5513 » else |
| 5514 » » break; |
| 5515 » } |
| 5516 #endif» /* DT_MIPS_RLD_MAP */ |
| 5517 |
| 5518 » if (dyn->d_tag == DT_DEBUG && map == -1) |
| 5519 » map = dyn->d_un.d_val; |
| 5133 | 5520 |
| 5134 if (dyn->d_tag == DT_NULL) | 5521 if (dyn->d_tag == DT_NULL) |
| 5135 break; | 5522 break; |
| 5136 } | 5523 } |
| 5137 else | 5524 else |
| 5138 { | 5525 { |
| 5139 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf; | 5526 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf; |
| 5527 #ifdef DT_MIPS_RLD_MAP |
| 5528 union |
| 5529 { |
| 5530 Elf32_Word map; |
| 5531 unsigned char buf[sizeof (Elf32_Word)]; |
| 5532 } |
| 5533 rld_map; |
| 5140 | 5534 |
| 5141 » if (dyn->d_tag == DT_DEBUG) | 5535 » if (dyn->d_tag == DT_MIPS_RLD_MAP) |
| 5142 » return dyn->d_un.d_val; | 5536 » { |
| 5537 » if (linux_read_memory (dyn->d_un.d_val, |
| 5538 » » » » rld_map.buf, sizeof (rld_map.buf)) == 0) |
| 5539 » » return rld_map.map; |
| 5540 » else |
| 5541 » » break; |
| 5542 » } |
| 5543 #endif» /* DT_MIPS_RLD_MAP */ |
| 5544 |
| 5545 » if (dyn->d_tag == DT_DEBUG && map == -1) |
| 5546 » map = dyn->d_un.d_val; |
| 5143 | 5547 |
| 5144 if (dyn->d_tag == DT_NULL) | 5548 if (dyn->d_tag == DT_NULL) |
| 5145 break; | 5549 break; |
| 5146 } | 5550 } |
| 5147 | 5551 |
| 5148 dynamic_memaddr += dyn_size; | 5552 dynamic_memaddr += dyn_size; |
| 5149 } | 5553 } |
| 5150 | 5554 |
| 5151 return (CORE_ADDR) -1; | 5555 return map; |
| 5152 } | 5556 } |
| 5153 | 5557 |
| 5154 /* Read one pointer from MEMADDR in the inferior. */ | 5558 /* Read one pointer from MEMADDR in the inferior. */ |
| 5155 | 5559 |
| 5156 static int | 5560 static int |
| 5157 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size) | 5561 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size) |
| 5158 { | 5562 { |
| 5159 *ptr = 0; | 5563 int ret; |
| 5160 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size); | 5564 |
| 5565 /* Go through a union so this works on either big or little endian |
| 5566 hosts, when the inferior's pointer size is smaller than the size |
| 5567 of CORE_ADDR. It is assumed the inferior's endianness is the |
| 5568 same of the superior's. */ |
| 5569 union |
| 5570 { |
| 5571 CORE_ADDR core_addr; |
| 5572 unsigned int ui; |
| 5573 unsigned char uc; |
| 5574 } addr; |
| 5575 |
| 5576 ret = linux_read_memory (memaddr, &addr.uc, ptr_size); |
| 5577 if (ret == 0) |
| 5578 { |
| 5579 if (ptr_size == sizeof (CORE_ADDR)) |
| 5580 » *ptr = addr.core_addr; |
| 5581 else if (ptr_size == sizeof (unsigned int)) |
| 5582 » *ptr = addr.ui; |
| 5583 else |
| 5584 » gdb_assert_not_reached ("unhandled pointer size"); |
| 5585 } |
| 5586 return ret; |
| 5161 } | 5587 } |
| 5162 | 5588 |
| 5163 struct link_map_offsets | 5589 struct link_map_offsets |
| 5164 { | 5590 { |
| 5165 /* Offset and size of r_debug.r_version. */ | 5591 /* Offset and size of r_debug.r_version. */ |
| 5166 int r_version_offset; | 5592 int r_version_offset; |
| 5167 | 5593 |
| 5168 /* Offset and size of r_debug.r_map. */ | 5594 /* Offset and size of r_debug.r_map. */ |
| 5169 int r_map_offset; | 5595 int r_map_offset; |
| 5170 | 5596 |
| 5171 /* Offset to l_addr field in struct link_map. */ | 5597 /* Offset to l_addr field in struct link_map. */ |
| 5172 int l_addr_offset; | 5598 int l_addr_offset; |
| 5173 | 5599 |
| 5174 /* Offset to l_name field in struct link_map. */ | 5600 /* Offset to l_name field in struct link_map. */ |
| 5175 int l_name_offset; | 5601 int l_name_offset; |
| 5176 | 5602 |
| 5177 /* Offset to l_ld field in struct link_map. */ | 5603 /* Offset to l_ld field in struct link_map. */ |
| 5178 int l_ld_offset; | 5604 int l_ld_offset; |
| 5179 | 5605 |
| 5180 /* Offset to l_next field in struct link_map. */ | 5606 /* Offset to l_next field in struct link_map. */ |
| 5181 int l_next_offset; | 5607 int l_next_offset; |
| 5182 | 5608 |
| 5183 /* Offset to l_prev field in struct link_map. */ | 5609 /* Offset to l_prev field in struct link_map. */ |
| 5184 int l_prev_offset; | 5610 int l_prev_offset; |
| 5185 }; | 5611 }; |
| 5186 | 5612 |
| 5187 /* Construct qXfer:libraries:read reply. */ | 5613 /* Construct qXfer:libraries-svr4:read reply. */ |
| 5188 | 5614 |
| 5189 static int | 5615 static int |
| 5190 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf, | 5616 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf, |
| 5191 unsigned const char *writebuf, | 5617 unsigned const char *writebuf, |
| 5192 CORE_ADDR offset, int len) | 5618 CORE_ADDR offset, int len) |
| 5193 { | 5619 { |
| 5194 char *document; | 5620 char *document; |
| 5195 unsigned document_len; | 5621 unsigned document_len; |
| 5196 struct process_info_private *const priv = current_process ()->private; | 5622 struct process_info_private *const priv = current_process ()->private; |
| 5197 char filename[PATH_MAX]; | 5623 char filename[PATH_MAX]; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 5212 { | 5638 { |
| 5213 0, /* r_version offset. */ | 5639 0, /* r_version offset. */ |
| 5214 8, /* r_debug.r_map offset. */ | 5640 8, /* r_debug.r_map offset. */ |
| 5215 0, /* l_addr offset in link_map. */ | 5641 0, /* l_addr offset in link_map. */ |
| 5216 8, /* l_name offset in link_map. */ | 5642 8, /* l_name offset in link_map. */ |
| 5217 16, /* l_ld offset in link_map. */ | 5643 16, /* l_ld offset in link_map. */ |
| 5218 24, /* l_next offset in link_map. */ | 5644 24, /* l_next offset in link_map. */ |
| 5219 32 /* l_prev offset in link_map. */ | 5645 32 /* l_prev offset in link_map. */ |
| 5220 }; | 5646 }; |
| 5221 const struct link_map_offsets *lmo; | 5647 const struct link_map_offsets *lmo; |
| 5648 unsigned int machine; |
| 5222 | 5649 |
| 5223 if (writebuf != NULL) | 5650 if (writebuf != NULL) |
| 5224 return -2; | 5651 return -2; |
| 5225 if (readbuf == NULL) | 5652 if (readbuf == NULL) |
| 5226 return -1; | 5653 return -1; |
| 5227 | 5654 |
| 5228 pid = lwpid_of (get_thread_lwp (current_inferior)); | 5655 pid = lwpid_of (get_thread_lwp (current_inferior)); |
| 5229 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid); | 5656 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid); |
| 5230 is_elf64 = elf_64_file_p (filename); | 5657 is_elf64 = elf_64_file_p (filename, &machine); |
| 5231 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets; | 5658 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets; |
| 5232 | 5659 |
| 5233 if (priv->r_debug == 0) | 5660 if (priv->r_debug == 0) |
| 5234 priv->r_debug = get_r_debug (pid, is_elf64); | 5661 priv->r_debug = get_r_debug (pid, is_elf64); |
| 5235 | 5662 |
| 5236 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0) | 5663 /* We failed to find DT_DEBUG. Such situation will not change for this |
| 5664 inferior - do not retry it. Report it to GDB as E01, see for the reasons |
| 5665 at the GDB solib-svr4.c side. */ |
| 5666 if (priv->r_debug == (CORE_ADDR) -1) |
| 5667 return -1; |
| 5668 |
| 5669 if (priv->r_debug == 0) |
| 5237 { | 5670 { |
| 5238 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n"); | 5671 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n"); |
| 5239 } | 5672 } |
| 5240 else | 5673 else |
| 5241 { | 5674 { |
| 5242 int allocated = 1024; | 5675 int allocated = 1024; |
| 5243 char *p; | 5676 char *p; |
| 5244 const int ptr_size = is_elf64 ? 8 : 4; | 5677 const int ptr_size = is_elf64 ? 8 : 4; |
| 5245 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev; | 5678 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev; |
| 5246 int r_version, header_done = 0; | 5679 int r_version, header_done = 0; |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5329 p = p + strlen (p); | 5762 p = p + strlen (p); |
| 5330 } | 5763 } |
| 5331 | 5764 |
| 5332 if (l_next == 0) | 5765 if (l_next == 0) |
| 5333 break; | 5766 break; |
| 5334 | 5767 |
| 5335 lm_prev = lm_addr; | 5768 lm_prev = lm_addr; |
| 5336 lm_addr = l_next; | 5769 lm_addr = l_next; |
| 5337 } | 5770 } |
| 5338 done: | 5771 done: |
| 5339 strcpy (p, "</library-list-svr4>"); | 5772 if (!header_done) |
| 5773 » { |
| 5774 » /* Empty list; terminate `<library-list-svr4'. */ |
| 5775 » strcpy (p, "/>"); |
| 5776 » } |
| 5777 else |
| 5778 » strcpy (p, "</library-list-svr4>"); |
| 5340 } | 5779 } |
| 5341 | 5780 |
| 5342 document_len = strlen (document); | 5781 document_len = strlen (document); |
| 5343 if (offset < document_len) | 5782 if (offset < document_len) |
| 5344 document_len -= offset; | 5783 document_len -= offset; |
| 5345 else | 5784 else |
| 5346 document_len = 0; | 5785 document_len = 0; |
| 5347 if (len > document_len) | 5786 if (len > document_len) |
| 5348 len = document_len; | 5787 len = document_len; |
| 5349 | 5788 |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5409 NULL, | 5848 NULL, |
| 5410 linux_pause_all, | 5849 linux_pause_all, |
| 5411 linux_unpause_all, | 5850 linux_unpause_all, |
| 5412 linux_cancel_breakpoints, | 5851 linux_cancel_breakpoints, |
| 5413 linux_stabilize_threads, | 5852 linux_stabilize_threads, |
| 5414 linux_install_fast_tracepoint_jump_pad, | 5853 linux_install_fast_tracepoint_jump_pad, |
| 5415 linux_emit_ops, | 5854 linux_emit_ops, |
| 5416 linux_supports_disable_randomization, | 5855 linux_supports_disable_randomization, |
| 5417 linux_get_min_fast_tracepoint_insn_len, | 5856 linux_get_min_fast_tracepoint_insn_len, |
| 5418 linux_qxfer_libraries_svr4, | 5857 linux_qxfer_libraries_svr4, |
| 5858 linux_supports_agent, |
| 5419 }; | 5859 }; |
| 5420 | 5860 |
| 5421 static void | 5861 static void |
| 5422 linux_init_signals () | 5862 linux_init_signals () |
| 5423 { | 5863 { |
| 5424 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads | 5864 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads |
| 5425 to find what the cancel signal actually is. */ | 5865 to find what the cancel signal actually is. */ |
| 5426 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */ | 5866 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */ |
| 5427 signal (__SIGRTMIN+1, SIG_IGN); | 5867 signal (__SIGRTMIN+1, SIG_IGN); |
| 5428 #endif | 5868 #endif |
| 5429 } | 5869 } |
| 5430 | 5870 |
| 5431 void | 5871 void |
| 5432 initialize_low (void) | 5872 initialize_low (void) |
| 5433 { | 5873 { |
| 5434 struct sigaction sigchld_action; | 5874 struct sigaction sigchld_action; |
| 5435 memset (&sigchld_action, 0, sizeof (sigchld_action)); | 5875 memset (&sigchld_action, 0, sizeof (sigchld_action)); |
| 5436 set_target_ops (&linux_target_ops); | 5876 set_target_ops (&linux_target_ops); |
| 5437 set_breakpoint_data (the_low_target.breakpoint, | 5877 set_breakpoint_data (the_low_target.breakpoint, |
| 5438 the_low_target.breakpoint_len); | 5878 the_low_target.breakpoint_len); |
| 5439 linux_init_signals (); | 5879 linux_init_signals (); |
| 5440 linux_test_for_tracefork (); | 5880 linux_test_for_tracefork (); |
| 5881 linux_ptrace_init_warnings (); |
| 5441 #ifdef HAVE_LINUX_REGSETS | 5882 #ifdef HAVE_LINUX_REGSETS |
| 5442 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++) | 5883 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++) |
| 5443 ; | 5884 ; |
| 5444 disabled_regsets = xmalloc (num_regsets); | 5885 disabled_regsets = xmalloc (num_regsets); |
| 5445 #endif | 5886 #endif |
| 5446 | 5887 |
| 5447 sigchld_action.sa_handler = sigchld_handler; | 5888 sigchld_action.sa_handler = sigchld_handler; |
| 5448 sigemptyset (&sigchld_action.sa_mask); | 5889 sigemptyset (&sigchld_action.sa_mask); |
| 5449 sigchld_action.sa_flags = SA_RESTART; | 5890 sigchld_action.sa_flags = SA_RESTART; |
| 5450 sigaction (SIGCHLD, &sigchld_action, NULL); | 5891 sigaction (SIGCHLD, &sigchld_action, NULL); |
| 5451 } | 5892 } |
| OLD | NEW |