Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/profiler/native_stack_sampler.h" | |
| 6 | |
| 7 #include <dlfcn.h> | |
| 8 #include <libkern/OSByteOrder.h> | |
| 9 #include <libunwind.h> | |
| 10 #include <mach-o/swap.h> | |
| 11 #include <mach/kern_return.h> | |
| 12 #include <mach/mach.h> | |
| 13 #include <mach/thread_act.h> | |
| 14 #include <pthread.h> | |
| 15 #include <sys/syslimits.h> | |
| 16 | |
| 17 #include <map> | |
| 18 #include <memory> | |
| 19 | |
| 20 #include "base/logging.h" | |
| 21 #include "base/macros.h" | |
| 22 #include "base/memory/ptr_util.h" | |
| 23 #include "base/strings/string_number_conversions.h" | |
| 24 | |
| 25 namespace base { | |
| 26 | |
| 27 namespace { | |
| 28 | |
| 29 // Stack walking -------------------------------------------------------------- | |
| 30 | |
| 31 // Fills |state| with |target_thread|'s context. | |
| 32 // | |
| 33 // Note that this is called while a thread is suspended. Make very very sure | |
| 34 // that no shared resources (e.g. memory allocators) are used for the duration | |
| 35 // of this function. | |
| 36 bool GetThreadState(thread_act_t target_thread, x86_thread_state64_t* state) { | |
| 37 mach_msg_type_number_t count = | |
| 38 static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT); | |
| 39 return thread_get_state(target_thread, x86_THREAD_STATE64, | |
| 40 reinterpret_cast<thread_state_t>(state), | |
| 41 &count) == KERN_SUCCESS; | |
| 42 } | |
| 43 | |
| 44 // If the value at |pointer| points to the original stack, rewrites it to point | |
| 45 // to the corresponding location in the copied stack. | |
| 46 // | |
| 47 // Note that this is called while a thread is suspended. Make very very sure | |
| 48 // that no shared resources (e.g. memory allocators) are used for the duration | |
| 49 // of this function. | |
| 50 uint64_t RewritePointerIfInOriginalStack(uint64_t* original_stack_bottom, | |
| 51 uint64_t* original_stack_top, | |
| 52 uint64_t* stack_copy_bottom, | |
| 53 uint64_t pointer) { | |
| 54 uint64_t original_stack_bottom_int = | |
| 55 reinterpret_cast<uint64_t>(original_stack_bottom); | |
| 56 uint64_t original_stack_top_int = | |
| 57 reinterpret_cast<uint64_t>(original_stack_top); | |
| 58 uint64_t stack_copy_bottom_int = | |
| 59 reinterpret_cast<uint64_t>(stack_copy_bottom); | |
| 60 | |
| 61 if ((pointer < original_stack_bottom_int) || | |
| 62 (pointer >= original_stack_top_int)) { | |
| 63 return pointer; | |
| 64 } | |
| 65 | |
| 66 return stack_copy_bottom_int + (pointer - original_stack_bottom_int); | |
| 67 } | |
| 68 | |
| 69 // Copies the stack to a buffer while rewriting possible pointers to locations | |
| 70 // within the stack to point to the corresponding locations in the copy. This is | |
| 71 // necessary to handle stack frames with dynamic stack allocation, where a | |
| 72 // pointer to the beginning of the dynamic allocation area is stored on the | |
| 73 // stack and/or in a non-volatile register. | |
| 74 // | |
| 75 // Eager rewriting of anything that looks like a pointer to the stack, as done | |
| 76 // in this function, does not adversely affect the stack unwinding. The only | |
| 77 // other values on the stack the unwinding depends on are return addresses, | |
| 78 // which should not point within the stack memory. The rewriting is guaranteed | |
| 79 // to catch all pointers because the stacks are guaranteed by the ABI to be | |
| 80 // sizeof(void*) aligned. | |
| 81 // | |
| 82 // Note that this is called while a thread is suspended. Make very very sure | |
| 83 // that no shared resources (e.g. memory allocators) are used for the duration | |
| 84 // of this function. | |
| 85 void CopyStackAndRewritePointers(void* dest, | |
| 86 void* from, | |
| 87 void* to, | |
| 88 x86_thread_state64_t* thread_state) | |
| 89 NO_SANITIZE("address") { | |
| 90 uint64_t* original_stack_bottom = static_cast<uint64_t*>(from); | |
| 91 uint64_t* original_stack_top = static_cast<uint64_t*>(to); | |
| 92 uint64_t* stack_copy_bottom = static_cast<uint64_t*>(dest); | |
| 93 | |
| 94 size_t count = original_stack_top - original_stack_bottom; | |
| 95 for (size_t pos = 0; pos < count; ++pos) { | |
| 96 stack_copy_bottom[pos] = RewritePointerIfInOriginalStack( | |
| 97 original_stack_bottom, original_stack_top, stack_copy_bottom, | |
| 98 original_stack_bottom[pos]); | |
| 99 } | |
| 100 | |
| 101 thread_state->__rbp = | |
| 102 RewritePointerIfInOriginalStack(original_stack_bottom, original_stack_top, | |
| 103 stack_copy_bottom, thread_state->__rbp); | |
| 104 thread_state->__rsp = | |
| 105 RewritePointerIfInOriginalStack(original_stack_bottom, original_stack_top, | |
| 106 stack_copy_bottom, thread_state->__rsp); | |
| 107 } | |
| 108 | |
| 109 // Walks the stack represented by |unwind_context|, calling back to the provided | |
| 110 // lambda for each frame. Returns false if an error occurred, otherwise returns | |
| 111 // true. | |
| 112 template <typename StackFrameCallback> | |
| 113 bool WalkStackFromContext(unw_context_t* unwind_context, | |
| 114 size_t* frame_count, | |
| 115 const StackFrameCallback& callback) { | |
| 116 unw_cursor_t unwind_cursor; | |
| 117 unw_init_local(&unwind_cursor, unwind_context); | |
| 118 | |
| 119 int step_result; | |
| 120 unw_word_t ip; | |
| 121 do { | |
| 122 ++(*frame_count); | |
| 123 unw_get_reg(&unwind_cursor, UNW_REG_IP, &ip); | |
| 124 | |
| 125 callback(static_cast<uintptr_t>(ip)); | |
|
Mike Wittman
2017/03/03 20:22:22
This and a few other places are using uintptr_t fo
Avi (use Gerrit)
2017/03/03 20:36:47
Done.
| |
| 126 | |
| 127 step_result = unw_step(&unwind_cursor); | |
| 128 } while (step_result > 0); | |
| 129 | |
| 130 if (step_result != 0) | |
| 131 return false; | |
| 132 | |
| 133 return true; | |
| 134 } | |
| 135 | |
| 136 bool IsIPInValidImage(unw_context_t* unwind_context) { | |
| 137 unw_cursor_t unwind_cursor; | |
| 138 unw_init_local(&unwind_cursor, unwind_context); | |
| 139 unw_proc_info_t proc_info; | |
| 140 unw_get_proc_info(&unwind_cursor, &proc_info); | |
| 141 return proc_info.extra != 0; | |
| 142 } | |
| 143 | |
| 144 // Walks the stack represented by |thread_state|, calling back to the provided | |
| 145 // lambda for each frame. | |
| 146 template <typename StackFrameCallback> | |
| 147 void WalkStack(const x86_thread_state64_t& thread_state, | |
| 148 uint64_t stack_top, | |
| 149 const StackFrameCallback& callback) { | |
| 150 size_t frame_count = 0; | |
| 151 // This uses libunwind to walk the stack. libunwind is designed to be used for | |
| 152 // a thread to walk its own stack. This creates two problems. | |
| 153 | |
| 154 // Problem 1: There is no official way to create a unw_context other than to | |
| 155 // create it from the current state of the current thread's stack. To get | |
| 156 // around this, forge a context. A unw_context is just a copy of the 16 main | |
| 157 // registers followed by the instruction pointer, nothing more. | |
| 158 // Coincidentally, the first 17 items of the x86_thread_state64_t type are | |
| 159 // exactly those registers in exactly the same order, so just bulk copy them | |
| 160 // over. | |
| 161 unw_context_t unwind_context; | |
| 162 memcpy(&unwind_context, &thread_state, sizeof(uint64_t) * 17); | |
| 163 bool result = WalkStackFromContext(&unwind_context, &frame_count, callback); | |
| 164 | |
| 165 if (!result) | |
| 166 return; | |
| 167 | |
| 168 if (frame_count == 1) { | |
| 169 // Problem 2: Because libunwind is designed to be triggered by user code on | |
| 170 // their own thread, if it hits a library that has no unwind info for the | |
| 171 // function that is being executed, it just stops. This isn't a problem in | |
| 172 // the normal case, but in this case, it's quite possible that the stack | |
| 173 // being walked is stopped in a function that bridges to the kernel and thus | |
| 174 // is missing the unwind info. | |
| 175 // | |
| 176 // If so, cheat by scanning the stack and trying again. Only do this once, | |
| 177 // and only if the first time using libunwind fails after one frame. | |
| 178 bool ip_in_valid_image = false; | |
| 179 uint64_t& rsp = unwind_context.data[7]; | |
| 180 uint64_t& rip = unwind_context.data[16]; | |
| 181 do { | |
| 182 rip = *reinterpret_cast<uint64_t*>(rsp); // rip = *rsp | |
| 183 rsp += 8; // rsp++ | |
| 184 ip_in_valid_image = IsIPInValidImage(&unwind_context); | |
| 185 } while (!ip_in_valid_image && rsp < stack_top); | |
| 186 | |
| 187 if (ip_in_valid_image) | |
| 188 WalkStackFromContext(&unwind_context, &frame_count, callback); | |
| 189 } | |
| 190 } | |
| 191 | |
| 192 // Module identifiers --------------------------------------------------------- | |
| 193 | |
| 194 // Fills |id| with the UUID of the x86_64 Mach-O binary with the header | |
| 195 // |mach_header|. Returns false if the binary is malformed or does not contain | |
| 196 // the UUID load command. | |
| 197 bool GetUUID(const mach_header_64* mach_header, unsigned char* id) { | |
| 198 size_t offset = sizeof(mach_header_64); | |
| 199 size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds; | |
| 200 for (uint32_t i = 0; (i < mach_header->ncmds) && | |
| 201 (offset + sizeof(load_command) < offset_limit); | |
| 202 ++i) { | |
| 203 const load_command* current_cmd = reinterpret_cast<const load_command*>( | |
| 204 reinterpret_cast<const uint8_t*>(mach_header) + offset); | |
| 205 | |
| 206 if (offset + current_cmd->cmdsize > offset_limit) { | |
| 207 // This command runs off the end of the command list. This is malformed. | |
| 208 return false; | |
| 209 } | |
| 210 | |
| 211 if (current_cmd->cmd == LC_UUID) { | |
| 212 if (current_cmd->cmdsize < sizeof(uuid_command)) { | |
| 213 // This "UUID command" is too small. This is malformed. | |
| 214 return false; | |
| 215 } | |
| 216 | |
| 217 const uuid_command* uuid_cmd = | |
| 218 reinterpret_cast<const uuid_command*>(current_cmd); | |
| 219 static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t), | |
| 220 "UUID field of UUID command should be 16 bytes."); | |
| 221 memcpy(id, &uuid_cmd->uuid, sizeof(uuid_t)); | |
| 222 return true; | |
| 223 } | |
| 224 offset += current_cmd->cmdsize; | |
| 225 } | |
| 226 return false; | |
| 227 } | |
| 228 | |
| 229 // Returns the hex encoding of a 16-byte ID for the binary loaded at | |
| 230 // |module_addr|. Returns an empty string if the UUID cannot be found at | |
| 231 // |module_addr|. | |
| 232 std::string GetUniqueId(const void* module_addr) { | |
| 233 const mach_header_64* mach_header = | |
| 234 reinterpret_cast<const mach_header_64*>(module_addr); | |
| 235 DCHECK_EQ(MH_MAGIC_64, mach_header->magic); | |
| 236 | |
| 237 unsigned char id[sizeof(uuid_t)]; | |
| 238 if (!GetUUID(mach_header, id)) | |
| 239 return ""; | |
| 240 return HexEncode(id, sizeof(uuid_t)); | |
| 241 } | |
| 242 | |
| 243 // Gets the index for the Module containing |instruction_pointer| in | |
| 244 // |modules|, adding it if it's not already present. Returns | |
| 245 // StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be | |
| 246 // determined for |module|. | |
| 247 size_t GetModuleIndex(const uintptr_t instruction_pointer, | |
| 248 std::vector<StackSamplingProfiler::Module>* modules, | |
| 249 std::map<const void*, size_t>* profile_module_index) { | |
| 250 Dl_info inf; | |
| 251 if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf)) | |
| 252 return StackSamplingProfiler::Frame::kUnknownModuleIndex; | |
| 253 | |
| 254 auto module_index = profile_module_index->find(inf.dli_fbase); | |
| 255 if (module_index == profile_module_index->end()) { | |
| 256 StackSamplingProfiler::Module module( | |
| 257 reinterpret_cast<uintptr_t>(inf.dli_fbase), GetUniqueId(inf.dli_fbase), | |
| 258 base::FilePath(inf.dli_fname)); | |
| 259 modules->push_back(module); | |
| 260 module_index = | |
| 261 profile_module_index | |
| 262 ->insert(std::make_pair(inf.dli_fbase, modules->size() - 1)) | |
| 263 .first; | |
| 264 } | |
| 265 return module_index->second; | |
| 266 } | |
| 267 | |
| 268 // ScopedSuspendThread -------------------------------------------------------- | |
| 269 | |
| 270 // Suspends a thread for the lifetime of the object. | |
| 271 class ScopedSuspendThread { | |
| 272 public: | |
| 273 explicit ScopedSuspendThread(mach_port_t thread_port); | |
| 274 ~ScopedSuspendThread(); | |
| 275 | |
| 276 bool was_successful() const { return was_successful_; } | |
| 277 | |
| 278 private: | |
| 279 mach_port_t thread_port_; | |
| 280 bool was_successful_; | |
| 281 | |
| 282 DISALLOW_COPY_AND_ASSIGN(ScopedSuspendThread); | |
| 283 }; | |
| 284 | |
| 285 ScopedSuspendThread::ScopedSuspendThread(mach_port_t thread_port) | |
| 286 : thread_port_(thread_port), | |
| 287 was_successful_(thread_suspend(thread_port) == KERN_SUCCESS) {} | |
| 288 | |
| 289 ScopedSuspendThread::~ScopedSuspendThread() { | |
| 290 if (!was_successful_) | |
| 291 return; | |
| 292 | |
| 293 kern_return_t resume_result = thread_resume(thread_port_); | |
| 294 CHECK_EQ(KERN_SUCCESS, resume_result) << "thread_resume failed"; | |
| 295 } | |
| 296 | |
| 297 // NativeStackSamplerMac ------------------------------------------------------ | |
| 298 | |
| 299 class NativeStackSamplerMac : public NativeStackSampler { | |
| 300 public: | |
| 301 NativeStackSamplerMac(mach_port_t thread_port, | |
| 302 AnnotateCallback annotator, | |
| 303 NativeStackSamplerTestDelegate* test_delegate); | |
| 304 ~NativeStackSamplerMac() override; | |
| 305 | |
| 306 // StackSamplingProfiler::NativeStackSampler: | |
| 307 void ProfileRecordingStarting( | |
| 308 std::vector<StackSamplingProfiler::Module>* modules) override; | |
| 309 void RecordStackSample(StackSamplingProfiler::Sample* sample) override; | |
| 310 void ProfileRecordingStopped() override; | |
| 311 | |
| 312 private: | |
| 313 enum { | |
| 314 // Intended to hold the largest stack used by Chrome. The default macOS main | |
| 315 // thread stack size is 8 MB, and this allows for expansion if it occurs. | |
| 316 kStackCopyBufferSize = 12 * 1024 * 1024 | |
| 317 }; | |
| 318 | |
| 319 // Suspends the thread with |thread_port_|, copies its stack and resumes the | |
| 320 // thread, then records the stack frames and associated modules into |sample|. | |
| 321 void SuspendThreadAndRecordStack(StackSamplingProfiler::Sample* sample); | |
| 322 | |
| 323 // Weak reference: Mach port for thread being profiled. | |
| 324 mach_port_t thread_port_; | |
| 325 | |
| 326 const AnnotateCallback annotator_; | |
| 327 | |
| 328 NativeStackSamplerTestDelegate* const test_delegate_; | |
| 329 | |
| 330 // The stack base address corresponding to |thread_handle_|. | |
| 331 const void* const thread_stack_base_address_; | |
| 332 | |
| 333 // Buffer to use for copies of the stack. We use the same buffer for all the | |
| 334 // samples to avoid the overhead of multiple allocations and frees. | |
| 335 const std::unique_ptr<unsigned char[]> stack_copy_buffer_; | |
| 336 | |
| 337 // Weak. Points to the modules associated with the profile being recorded | |
| 338 // between ProfileRecordingStarting() and ProfileRecordingStopped(). | |
| 339 std::vector<StackSamplingProfiler::Module>* current_modules_ = nullptr; | |
| 340 | |
| 341 // Maps a module's base address to the corresponding Module's index within | |
| 342 // current_modules_. | |
| 343 std::map<const void*, size_t> profile_module_index_; | |
| 344 | |
| 345 DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerMac); | |
| 346 }; | |
| 347 | |
| 348 NativeStackSamplerMac::NativeStackSamplerMac( | |
| 349 mach_port_t thread_port, | |
| 350 AnnotateCallback annotator, | |
| 351 NativeStackSamplerTestDelegate* test_delegate) | |
| 352 : thread_port_(thread_port), | |
| 353 annotator_(annotator), | |
| 354 test_delegate_(test_delegate), | |
| 355 thread_stack_base_address_( | |
| 356 pthread_get_stackaddr_np(pthread_from_mach_thread_np(thread_port))), | |
| 357 stack_copy_buffer_(new unsigned char[kStackCopyBufferSize]) { | |
| 358 DCHECK(annotator_); | |
| 359 | |
| 360 // This class suspends threads, and those threads might be suspended in dyld. | |
| 361 // Therefore, for all the system functions that might be linked in dynamically | |
| 362 // that are used while threads are suspended, make calls to them to make sure | |
| 363 // that they are linked up. | |
| 364 x86_thread_state64_t thread_state; | |
| 365 GetThreadState(thread_port_, &thread_state); | |
| 366 } | |
| 367 | |
| 368 NativeStackSamplerMac::~NativeStackSamplerMac() {} | |
| 369 | |
| 370 void NativeStackSamplerMac::ProfileRecordingStarting( | |
| 371 std::vector<StackSamplingProfiler::Module>* modules) { | |
| 372 current_modules_ = modules; | |
| 373 profile_module_index_.clear(); | |
| 374 } | |
| 375 | |
| 376 void NativeStackSamplerMac::RecordStackSample( | |
| 377 StackSamplingProfiler::Sample* sample) { | |
| 378 DCHECK(current_modules_); | |
| 379 | |
| 380 if (!stack_copy_buffer_) | |
| 381 return; | |
| 382 | |
| 383 SuspendThreadAndRecordStack(sample); | |
| 384 } | |
| 385 | |
| 386 void NativeStackSamplerMac::ProfileRecordingStopped() { | |
| 387 current_modules_ = nullptr; | |
| 388 } | |
| 389 | |
| 390 void NativeStackSamplerMac::SuspendThreadAndRecordStack( | |
| 391 StackSamplingProfiler::Sample* sample) { | |
| 392 x86_thread_state64_t thread_state; | |
| 393 | |
| 394 // Copy the stack. | |
| 395 | |
| 396 uint64_t new_stack_top = 0; | |
| 397 { | |
| 398 // IMPORTANT NOTE: Do not do ANYTHING in this in this scope that might | |
| 399 // allocate memory, including indirectly via use of DCHECK/CHECK or other | |
| 400 // logging statements. Otherwise this code can deadlock on heap locks in the | |
| 401 // default heap acquired by the target thread before it was suspended. | |
| 402 ScopedSuspendThread suspend_thread(thread_port_); | |
| 403 if (!suspend_thread.was_successful()) | |
| 404 return; | |
| 405 | |
| 406 if (!GetThreadState(thread_port_, &thread_state)) | |
| 407 return; | |
| 408 uint64_t stack_top = reinterpret_cast<uint64_t>(thread_stack_base_address_); | |
| 409 uint64_t stack_bottom = thread_state.__rsp; | |
| 410 uint64_t stack_size = stack_top - stack_bottom; | |
| 411 | |
| 412 if (stack_size > kStackCopyBufferSize) | |
| 413 return; | |
| 414 | |
| 415 (*annotator_)(sample); | |
| 416 | |
| 417 CopyStackAndRewritePointers( | |
| 418 stack_copy_buffer_.get(), reinterpret_cast<void*>(stack_bottom), | |
| 419 reinterpret_cast<void*>(stack_top), &thread_state); | |
| 420 | |
| 421 new_stack_top = | |
| 422 reinterpret_cast<uint64_t>(stack_copy_buffer_.get()) + stack_size; | |
| 423 } // ScopedSuspendThread | |
| 424 | |
| 425 if (test_delegate_) | |
| 426 test_delegate_->OnPreStackWalk(); | |
| 427 | |
| 428 // Walk the stack and record it. | |
| 429 | |
| 430 // Reserve enough memory for most stacks, to avoid repeated allocations. | |
| 431 // Approximately 99.9% of recorded stacks are 128 frames or fewer. | |
| 432 sample->frames.reserve(128); | |
| 433 | |
| 434 auto current_modules = current_modules_; | |
| 435 auto profile_module_index = &profile_module_index_; | |
| 436 WalkStack( | |
| 437 thread_state, new_stack_top, | |
| 438 [sample, current_modules, profile_module_index](uintptr_t frame_ip) { | |
| 439 sample->frames.push_back(StackSamplingProfiler::Frame( | |
| 440 frame_ip, | |
| 441 GetModuleIndex(frame_ip, current_modules, profile_module_index))); | |
| 442 }); | |
| 443 } | |
| 444 | |
| 445 } // namespace | |
| 446 | |
| 447 std::unique_ptr<NativeStackSampler> NativeStackSampler::Create( | |
| 448 PlatformThreadId thread_id, | |
| 449 AnnotateCallback annotator, | |
| 450 NativeStackSamplerTestDelegate* test_delegate) { | |
| 451 #if !defined(__x86_64__) | |
| 452 // No. | |
| 453 return nullptr; | |
| 454 #endif | |
| 455 return base::MakeUnique<NativeStackSamplerMac>(thread_id, annotator, | |
| 456 test_delegate); | |
| 457 } | |
| 458 | |
| 459 } // namespace base | |
| OLD | NEW |