OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Crashpad Authors. All rights reserved. |
| 2 // |
| 3 // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 // you may not use this file except in compliance with the License. |
| 5 // You may obtain a copy of the License at |
| 6 // |
| 7 // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 // |
| 9 // Unless required by applicable law or agreed to in writing, software |
| 10 // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 // See the License for the specific language governing permissions and |
| 13 // limitations under the License. |
| 14 |
| 15 #include "util/mac/process_reader.h" |
| 16 |
| 17 #include <AvailabilityMacros.h> |
| 18 #include <mach/mach_vm.h> |
| 19 #include <mach-o/loader.h> |
| 20 |
| 21 #include <algorithm> |
| 22 |
| 23 #include "base/logging.h" |
| 24 #include "base/mac/mach_logging.h" |
| 25 #include "base/mac/scoped_mach_port.h" |
| 26 #include "base/mac/scoped_mach_vm.h" |
| 27 |
| 28 namespace { |
| 29 |
| 30 void MachTimeValueToTimeval(const time_value& mach, timeval* tv) { |
| 31 tv->tv_sec = mach.seconds; |
| 32 tv->tv_usec = mach.microseconds; |
| 33 } |
| 34 |
| 35 kern_return_t MachVMRegionRecurseDeepest(mach_port_t task, |
| 36 mach_vm_address_t* address, |
| 37 mach_vm_size_t* size, |
| 38 natural_t* depth, |
| 39 vm_prot_t* protection, |
| 40 unsigned int* user_tag) { |
| 41 vm_region_submap_short_info_64 submap_info; |
| 42 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; |
| 43 while (true) { |
| 44 kern_return_t kr = mach_vm_region_recurse( |
| 45 task, |
| 46 address, |
| 47 size, |
| 48 depth, |
| 49 reinterpret_cast<vm_region_recurse_info_t>(&submap_info), |
| 50 &count); |
| 51 if (kr != KERN_SUCCESS) { |
| 52 return kr; |
| 53 } |
| 54 |
| 55 if (!submap_info.is_submap) { |
| 56 *protection = submap_info.protection; |
| 57 *user_tag = submap_info.user_tag; |
| 58 return KERN_SUCCESS; |
| 59 } |
| 60 |
| 61 ++*depth; |
| 62 } |
| 63 } |
| 64 |
| 65 } // namespace |
| 66 |
| 67 namespace crashpad { |
| 68 |
| 69 ProcessReaderThread::ProcessReaderThread() |
| 70 : thread_context(), |
| 71 float_context(), |
| 72 debug_context(), |
| 73 id(0), |
| 74 stack_region_address(0), |
| 75 stack_region_size(0), |
| 76 thread_specific_data_address(0), |
| 77 port(MACH_PORT_NULL), |
| 78 suspend_count(0), |
| 79 priority(0) { |
| 80 } |
| 81 |
| 82 ProcessReaderModule::ProcessReaderModule() : name(), address(0), timestamp(0) { |
| 83 } |
| 84 |
| 85 ProcessReaderModule::~ProcessReaderModule() { |
| 86 } |
| 87 |
| 88 ProcessReader::ProcessReader() |
| 89 : kern_proc_info_(), |
| 90 threads_(), |
| 91 modules_(), |
| 92 task_memory_(), |
| 93 task_(MACH_PORT_NULL), |
| 94 initialized_(), |
| 95 is_64_bit_(false), |
| 96 initialized_threads_(false), |
| 97 initialized_modules_(false) { |
| 98 } |
| 99 |
| 100 ProcessReader::~ProcessReader() { |
| 101 for (const ProcessReaderThread& thread : threads_) { |
| 102 kern_return_t kr = mach_port_deallocate(mach_task_self(), thread.port); |
| 103 MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_port_deallocate"; |
| 104 } |
| 105 } |
| 106 |
| 107 bool ProcessReader::Initialize(mach_port_t task) { |
| 108 INITIALIZATION_STATE_SET_INITIALIZING(initialized_); |
| 109 |
| 110 pid_t pid; |
| 111 kern_return_t kr = pid_for_task(task, &pid); |
| 112 if (kr != KERN_SUCCESS) { |
| 113 MACH_LOG(ERROR, kr) << "pid_for_task"; |
| 114 return false; |
| 115 } |
| 116 |
| 117 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid}; |
| 118 size_t len = sizeof(kern_proc_info_); |
| 119 if (sysctl(mib, arraysize(mib), &kern_proc_info_, &len, NULL, 0) != 0) { |
| 120 PLOG(ERROR) << "sysctl for pid " << pid; |
| 121 return false; |
| 122 } |
| 123 |
| 124 DCHECK_EQ(kern_proc_info_.kp_proc.p_pid, pid); |
| 125 |
| 126 is_64_bit_ = kern_proc_info_.kp_proc.p_flag & P_LP64; |
| 127 |
| 128 task_memory_.reset(new TaskMemory(task)); |
| 129 task_ = task; |
| 130 |
| 131 INITIALIZATION_STATE_SET_VALID(initialized_); |
| 132 return true; |
| 133 } |
| 134 |
| 135 void ProcessReader::StartTime(timeval* start_time) const { |
| 136 INITIALIZATION_STATE_DCHECK_VALID(initialized_); |
| 137 *start_time = kern_proc_info_.kp_proc.p_starttime; |
| 138 } |
| 139 |
| 140 bool ProcessReader::CPUTimes(timeval* user_time, timeval* system_time) const { |
| 141 INITIALIZATION_STATE_DCHECK_VALID(initialized_); |
| 142 |
| 143 // Calculate user and system time the same way the kernel does for |
| 144 // getrusage(). See 10.9.2 xnu-2422.90.20/bsd/kern/kern_resource.c calcru(). |
| 145 timerclear(user_time); |
| 146 timerclear(system_time); |
| 147 |
| 148 // As of the 10.8 SDK, the preferred routine is MACH_TASK_BASIC_INFO. |
| 149 // TASK_BASIC_INFO_64_COUNT is equivalent and works on earlier systems. |
| 150 task_basic_info_64 task_basic_info; |
| 151 mach_msg_type_number_t task_basic_info_count = TASK_BASIC_INFO_64_COUNT; |
| 152 kern_return_t kr = task_info(task_, |
| 153 TASK_BASIC_INFO_64, |
| 154 reinterpret_cast<task_info_t>(&task_basic_info), |
| 155 &task_basic_info_count); |
| 156 if (kr != KERN_SUCCESS) { |
| 157 MACH_LOG(WARNING, kr) << "task_info TASK_BASIC_INFO_64"; |
| 158 return false; |
| 159 } |
| 160 |
| 161 task_thread_times_info_data_t task_thread_times; |
| 162 mach_msg_type_number_t task_thread_times_count = TASK_THREAD_TIMES_INFO_COUNT; |
| 163 kr = task_info(task_, |
| 164 TASK_THREAD_TIMES_INFO, |
| 165 reinterpret_cast<task_info_t>(&task_thread_times), |
| 166 &task_thread_times_count); |
| 167 if (kr != KERN_SUCCESS) { |
| 168 MACH_LOG(WARNING, kr) << "task_info TASK_THREAD_TIMES"; |
| 169 return false; |
| 170 } |
| 171 |
| 172 MachTimeValueToTimeval(task_basic_info.user_time, user_time); |
| 173 MachTimeValueToTimeval(task_basic_info.system_time, system_time); |
| 174 |
| 175 timeval thread_user_time; |
| 176 MachTimeValueToTimeval(task_thread_times.user_time, &thread_user_time); |
| 177 timeval thread_system_time; |
| 178 MachTimeValueToTimeval(task_thread_times.system_time, &thread_system_time); |
| 179 |
| 180 timeradd(user_time, &thread_user_time, user_time); |
| 181 timeradd(system_time, &thread_system_time, system_time); |
| 182 |
| 183 return true; |
| 184 } |
| 185 |
| 186 const std::vector<ProcessReaderThread>& ProcessReader::Threads() { |
| 187 INITIALIZATION_STATE_DCHECK_VALID(initialized_); |
| 188 |
| 189 if (!initialized_threads_) { |
| 190 InitializeThreads(); |
| 191 } |
| 192 |
| 193 return threads_; |
| 194 } |
| 195 |
| 196 const std::vector<ProcessReaderModule>& ProcessReader::Modules() { |
| 197 INITIALIZATION_STATE_DCHECK_VALID(initialized_); |
| 198 |
| 199 if (!initialized_modules_) { |
| 200 InitializeModules(); |
| 201 } |
| 202 |
| 203 return modules_; |
| 204 } |
| 205 |
| 206 void ProcessReader::InitializeThreads() { |
| 207 DCHECK(!initialized_threads_); |
| 208 DCHECK(threads_.empty()); |
| 209 |
| 210 initialized_threads_ = true; |
| 211 |
| 212 thread_act_array_t threads; |
| 213 mach_msg_type_number_t thread_count = 0; |
| 214 kern_return_t kr = task_threads(task_, &threads, &thread_count); |
| 215 if (kr != KERN_SUCCESS) { |
| 216 MACH_LOG(WARNING, kr) << "task_threads"; |
| 217 return; |
| 218 } |
| 219 |
| 220 // The send rights in the |threads| array won’t have their send rights managed |
| 221 // by anything until they’re added to |threads_| by the loop below. Any early |
| 222 // return (or exception) that happens between here and the completion of the |
| 223 // loop below will leak thread port send rights. |
| 224 |
| 225 base::mac::ScopedMachVM threads_vm( |
| 226 reinterpret_cast<vm_address_t>(threads), |
| 227 mach_vm_round_page(thread_count * sizeof(*threads))); |
| 228 |
| 229 for (size_t index = 0; index < thread_count; ++index) { |
| 230 ProcessReaderThread thread; |
| 231 thread.port = threads[index]; |
| 232 |
| 233 #if defined(ARCH_CPU_X86_FAMILY) |
| 234 const thread_state_flavor_t kThreadStateFlavor = |
| 235 Is64Bit() ? x86_THREAD_STATE64 : x86_THREAD_STATE32; |
| 236 mach_msg_type_number_t thread_state_count = |
| 237 Is64Bit() ? x86_THREAD_STATE64_COUNT : x86_THREAD_STATE32_COUNT; |
| 238 |
| 239 // TODO(mark): Use the AVX variants instead of the FLOAT variants? They’re |
| 240 // supported on 10.6 and later. |
| 241 const thread_state_flavor_t kFloatStateFlavor = |
| 242 Is64Bit() ? x86_FLOAT_STATE64 : x86_FLOAT_STATE32; |
| 243 mach_msg_type_number_t float_state_count = |
| 244 Is64Bit() ? x86_FLOAT_STATE64_COUNT : x86_FLOAT_STATE32_COUNT; |
| 245 |
| 246 const thread_state_flavor_t kDebugStateFlavor = |
| 247 Is64Bit() ? x86_DEBUG_STATE64 : x86_DEBUG_STATE32; |
| 248 mach_msg_type_number_t debug_state_count = |
| 249 Is64Bit() ? x86_DEBUG_STATE64_COUNT : x86_DEBUG_STATE32_COUNT; |
| 250 #endif |
| 251 |
| 252 kr = thread_get_state( |
| 253 thread.port, |
| 254 kThreadStateFlavor, |
| 255 reinterpret_cast<thread_state_t>(&thread.thread_context), |
| 256 &thread_state_count); |
| 257 if (kr != KERN_SUCCESS) { |
| 258 MACH_LOG(ERROR, kr) << "thread_get_state(" << kThreadStateFlavor << ")"; |
| 259 continue; |
| 260 } |
| 261 |
| 262 kr = thread_get_state( |
| 263 thread.port, |
| 264 kFloatStateFlavor, |
| 265 reinterpret_cast<thread_state_t>(&thread.float_context), |
| 266 &float_state_count); |
| 267 if (kr != KERN_SUCCESS) { |
| 268 MACH_LOG(ERROR, kr) << "thread_get_state(" << kFloatStateFlavor << ")"; |
| 269 continue; |
| 270 } |
| 271 |
| 272 kr = thread_get_state( |
| 273 thread.port, |
| 274 kDebugStateFlavor, |
| 275 reinterpret_cast<thread_state_t>(&thread.debug_context), |
| 276 &debug_state_count); |
| 277 if (kr != KERN_SUCCESS) { |
| 278 MACH_LOG(ERROR, kr) << "thread_get_state(" << kDebugStateFlavor << ")"; |
| 279 continue; |
| 280 } |
| 281 |
| 282 thread_basic_info basic_info; |
| 283 mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; |
| 284 kr = thread_info(thread.port, |
| 285 THREAD_BASIC_INFO, |
| 286 reinterpret_cast<thread_info_t>(&basic_info), |
| 287 &count); |
| 288 if (kr != KERN_SUCCESS) { |
| 289 MACH_LOG(WARNING, kr) << "thread_info(THREAD_BASIC_INFO)"; |
| 290 } else { |
| 291 thread.suspend_count = basic_info.suspend_count; |
| 292 } |
| 293 |
| 294 thread_identifier_info identifier_info; |
| 295 count = THREAD_IDENTIFIER_INFO_COUNT; |
| 296 kr = thread_info(thread.port, |
| 297 THREAD_IDENTIFIER_INFO, |
| 298 reinterpret_cast<thread_info_t>(&identifier_info), |
| 299 &count); |
| 300 if (kr != KERN_SUCCESS) { |
| 301 MACH_LOG(WARNING, kr) << "thread_info(THREAD_IDENTIFIER_INFO)"; |
| 302 } else { |
| 303 thread.id = identifier_info.thread_id; |
| 304 |
| 305 // thread_identifier_info::thread_handle contains the base of the |
| 306 // thread-specific data area, which on x86 and x86_64 is the thread’s base |
| 307 // address of the %gs segment. 10.9.2 xnu-2422.90.20/osfmk/kern/thread.c |
| 308 // thread_info_internal() gets the value from |
| 309 // machine_thread::cthread_self, which is the same value used to set the |
| 310 // %gs base in xnu-2422.90.20/osfmk/i386/pcb_native.c |
| 311 // act_machine_switch_pcb(). |
| 312 // |
| 313 // This address is the internal pthread’s _pthread::tsd[], an array of |
| 314 // void* values that can be indexed by pthread_key_t values. |
| 315 thread.thread_specific_data_address = identifier_info.thread_handle; |
| 316 } |
| 317 |
| 318 thread_precedence_policy precedence; |
| 319 count = THREAD_PRECEDENCE_POLICY_COUNT; |
| 320 boolean_t get_default = FALSE; |
| 321 kr = thread_policy_get(thread.port, |
| 322 THREAD_PRECEDENCE_POLICY, |
| 323 reinterpret_cast<thread_policy_t>(&precedence), |
| 324 &count, |
| 325 &get_default); |
| 326 if (kr != KERN_SUCCESS) { |
| 327 MACH_LOG(INFO, kr) << "thread_policy_get"; |
| 328 } else { |
| 329 thread.priority = precedence.importance; |
| 330 } |
| 331 |
| 332 #if defined(ARCH_CPU_X86_FAMILY) |
| 333 mach_vm_address_t stack_pointer = Is64Bit() |
| 334 ? thread.thread_context.t64.__rsp |
| 335 : thread.thread_context.t32.__esp; |
| 336 #endif |
| 337 |
| 338 thread.stack_region_address = |
| 339 CalculateStackRegion(stack_pointer, &thread.stack_region_size); |
| 340 |
| 341 threads_.push_back(thread); |
| 342 } |
| 343 } |
| 344 |
| 345 void ProcessReader::InitializeModules() { |
| 346 DCHECK(!initialized_modules_); |
| 347 DCHECK(modules_.empty()); |
| 348 |
| 349 initialized_modules_ = true; |
| 350 |
| 351 // TODO(mark): Complete this implementation. The implementation depends on |
| 352 // process_types, which cannot land yet because it depends on this file, |
| 353 // process_reader. This temporary “cut” was made to avoid a review that’s too |
| 354 // large. Yes, this circular dependency is unfortunate. Suggestions are |
| 355 // welcome. |
| 356 } |
| 357 |
| 358 mach_vm_address_t ProcessReader::CalculateStackRegion( |
| 359 mach_vm_address_t stack_pointer, |
| 360 mach_vm_size_t* stack_region_size) { |
| 361 INITIALIZATION_STATE_DCHECK_VALID(initialized_); |
| 362 |
| 363 // For pthreads, it may be possible to compute the stack region based on the |
| 364 // internal _pthread::stackaddr and _pthread::stacksize. The _pthread struct |
| 365 // for a thread can be located at TSD slot 0, or the known offsets of |
| 366 // stackaddr and stacksize from the TSD area could be used. |
| 367 mach_vm_address_t region_base = stack_pointer; |
| 368 mach_vm_size_t region_size; |
| 369 natural_t depth = 0; |
| 370 vm_prot_t protection; |
| 371 unsigned int user_tag; |
| 372 kern_return_t kr = MachVMRegionRecurseDeepest( |
| 373 task_, ®ion_base, ®ion_size, &depth, &protection, &user_tag); |
| 374 if (kr != KERN_SUCCESS) { |
| 375 MACH_LOG(INFO, kr) << "mach_vm_region_recurse"; |
| 376 *stack_region_size = 0; |
| 377 return 0; |
| 378 } |
| 379 |
| 380 if (region_base > stack_pointer) { |
| 381 // There’s nothing mapped at the stack pointer’s address. Something may have |
| 382 // trashed the stack pointer. Note that this shouldn’t happen for a normal |
| 383 // stack guard region violation because the guard region is mapped but has |
| 384 // VM_PROT_NONE protection. |
| 385 *stack_region_size = 0; |
| 386 return 0; |
| 387 } |
| 388 |
| 389 mach_vm_address_t start_address = stack_pointer; |
| 390 |
| 391 if ((protection & VM_PROT_READ) == 0) { |
| 392 // If the region isn’t readable, the stack pointer probably points to the |
| 393 // guard region. Don’t include it as part of the stack, and don’t include |
| 394 // anything at any lower memory address. The code below may still possibly |
| 395 // find the real stack region at a memory address higher than this region. |
| 396 start_address = region_base + region_size; |
| 397 } else { |
| 398 // If the ABI requires a red zone, adjust the region to include it if |
| 399 // possible. |
| 400 LocateRedZone(&start_address, ®ion_base, ®ion_size, user_tag); |
| 401 |
| 402 // Regardless of whether the ABI requires a red zone, capture up to |
| 403 // kExtraCaptureSize additional bytes of stack, but only if present in the |
| 404 // region that was already found. |
| 405 const mach_vm_size_t kExtraCaptureSize = 128; |
| 406 start_address = std::max(start_address >= kExtraCaptureSize |
| 407 ? start_address - kExtraCaptureSize |
| 408 : start_address, |
| 409 region_base); |
| 410 |
| 411 // Align start_address to a 16-byte boundary, which can help readers by |
| 412 // ensuring that data is aligned properly. This could page-align instead, |
| 413 // but that might be wasteful. |
| 414 const mach_vm_size_t kDesiredAlignment = 16; |
| 415 start_address &= ~(kDesiredAlignment - 1); |
| 416 DCHECK_GE(start_address, region_base); |
| 417 } |
| 418 |
| 419 region_size -= (start_address - region_base); |
| 420 region_base = start_address; |
| 421 |
| 422 mach_vm_size_t total_region_size = region_size; |
| 423 |
| 424 // The stack region may have gotten split up into multiple abutting regions. |
| 425 // Try to coalesce them. This frequently happens for the main thread’s stack |
| 426 // when setrlimit(RLIMIT_STACK, …) is called. It may also happen if a region |
| 427 // is split up due to an mprotect() or vm_protect() call. |
| 428 // |
| 429 // Stack regions created by the kernel and the pthreads library will be marked |
| 430 // with the VM_MEMORY_STACK user tag. Scanning for multiple adjacent regions |
| 431 // with the same tag should find an entire stack region. Checking that the |
| 432 // protection on individual regions is not VM_PROT_NONE should guarantee that |
| 433 // this algorithm doesn’t collect map entries belonging to another thread’s |
| 434 // stack: well-behaved stacks (such as those created by the kernel and the |
| 435 // pthreads library) have VM_PROT_NONE guard regions at their low-address |
| 436 // ends. |
| 437 // |
| 438 // Other stack regions may not be so well-behaved and thus if user_tag is not |
| 439 // VM_MEMORY_STACK, the single region that was found is used as-is without |
| 440 // trying to merge it with other adjacent regions. |
| 441 if (user_tag == VM_MEMORY_STACK) { |
| 442 mach_vm_address_t try_address = region_base; |
| 443 mach_vm_address_t original_try_address; |
| 444 |
| 445 while (try_address += region_size, |
| 446 original_try_address = try_address, |
| 447 (kr = MachVMRegionRecurseDeepest(task_, |
| 448 &try_address, |
| 449 ®ion_size, |
| 450 &depth, |
| 451 &protection, |
| 452 &user_tag) == KERN_SUCCESS) && |
| 453 try_address == original_try_address && |
| 454 (protection & VM_PROT_READ) != 0 && |
| 455 user_tag == VM_MEMORY_STACK) { |
| 456 total_region_size += region_size; |
| 457 } |
| 458 |
| 459 if (kr != KERN_SUCCESS && kr != KERN_INVALID_ADDRESS) { |
| 460 // Tolerate KERN_INVALID_ADDRESS because it will be returned when there |
| 461 // are no more regions in the map at or above the specified |try_address|. |
| 462 MACH_LOG(INFO, kr) << "mach_vm_region_recurse"; |
| 463 } |
| 464 } |
| 465 |
| 466 *stack_region_size = total_region_size; |
| 467 return region_base; |
| 468 } |
| 469 |
| 470 void ProcessReader::LocateRedZone(mach_vm_address_t* const start_address, |
| 471 mach_vm_address_t* const region_base, |
| 472 mach_vm_address_t* const region_size, |
| 473 const unsigned int user_tag) { |
| 474 #if defined(ARCH_CPU_X86_FAMILY) |
| 475 if (Is64Bit()) { |
| 476 // x86_64 has a red zone. See AMD64 ABI 0.99.6, |
| 477 // http://www.x86-64.org/documentation/abi.pdf, section 3.2.2, “The Stack |
| 478 // Frame”. |
| 479 const mach_vm_size_t kRedZoneSize = 128; |
| 480 mach_vm_address_t red_zone_base = |
| 481 *start_address >= kRedZoneSize ? *start_address - kRedZoneSize : 0; |
| 482 bool red_zone_ok = false; |
| 483 if (red_zone_base >= *region_base) { |
| 484 // The red zone is within the region already discovered. |
| 485 red_zone_ok = true; |
| 486 } else if (red_zone_base < *region_base && user_tag == VM_MEMORY_STACK) { |
| 487 // Probe to see if there’s a region immediately below the one already |
| 488 // discovered. |
| 489 mach_vm_address_t red_zone_region_base = red_zone_base; |
| 490 mach_vm_size_t red_zone_region_size; |
| 491 natural_t red_zone_depth = 0; |
| 492 vm_prot_t red_zone_protection; |
| 493 unsigned int red_zone_user_tag; |
| 494 kern_return_t kr = MachVMRegionRecurseDeepest(task_, |
| 495 &red_zone_region_base, |
| 496 &red_zone_region_size, |
| 497 &red_zone_depth, |
| 498 &red_zone_protection, |
| 499 &red_zone_user_tag); |
| 500 if (kr != KERN_SUCCESS) { |
| 501 MACH_LOG(INFO, kr) << "mach_vm_region_recurse"; |
| 502 *start_address = *region_base; |
| 503 } else if (red_zone_region_base + red_zone_region_size == *region_base && |
| 504 (red_zone_protection & VM_PROT_READ) != 0 && |
| 505 red_zone_user_tag == user_tag) { |
| 506 // The region containing the red zone is immediately below the region |
| 507 // already found, it’s readable (not the guard region), and it has the |
| 508 // same user tag as the region already found, so merge them. |
| 509 red_zone_ok = true; |
| 510 *region_base -= red_zone_region_size; |
| 511 *region_size += red_zone_region_size; |
| 512 } |
| 513 } |
| 514 |
| 515 if (red_zone_ok) { |
| 516 // Begin capturing from the base of the red zone (but not the entire |
| 517 // region that encompasses the red zone). |
| 518 *start_address = red_zone_base; |
| 519 } else { |
| 520 // The red zone would go lower into another region in memory, but no |
| 521 // region was found. Memory can only be captured to an address as low as |
| 522 // the base address of the region already found. |
| 523 *start_address = *region_base; |
| 524 } |
| 525 } |
| 526 #endif |
| 527 } |
| 528 |
| 529 } // namespace crashpad |
OLD | NEW |