Chromium Code Reviews| Index: base/profiler/native_stack_sampler_mac.cc | 
| diff --git a/base/profiler/native_stack_sampler_mac.cc b/base/profiler/native_stack_sampler_mac.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..6e823a947e2f8cf239638e67e7652bcf0c01608d | 
| --- /dev/null | 
| +++ b/base/profiler/native_stack_sampler_mac.cc | 
| @@ -0,0 +1,460 @@ | 
| +// Copyright 2017 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include "base/profiler/native_stack_sampler.h" | 
| + | 
| +#include <dlfcn.h> | 
| +#include <libkern/OSByteOrder.h> | 
| +#include <libunwind.h> | 
| +#include <mach-o/swap.h> | 
| +#include <mach/kern_return.h> | 
| +#include <mach/mach.h> | 
| +#include <mach/thread_act.h> | 
| +#include <pthread.h> | 
| +#include <sys/syslimits.h> | 
| + | 
| +#include <map> | 
| +#include <memory> | 
| + | 
| +#include "base/logging.h" | 
| +#include "base/macros.h" | 
| +#include "base/memory/ptr_util.h" | 
| +#include "base/strings/string_number_conversions.h" | 
| + | 
| +namespace base { | 
| + | 
| +namespace { | 
| + | 
| +// Stack walking -------------------------------------------------------------- | 
| + | 
| +// Fills |state| with |target_thread|'s context. | 
| +// | 
| +// Note that this is called while a thread is suspended. Make very very sure | 
| +// that no shared resources (e.g. memory allocators) are used for the duration | 
| +// of this function. | 
| +bool GetThreadState(thread_act_t target_thread, x86_thread_state64_t* state) { | 
| + mach_msg_type_number_t count = | 
| + static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT); | 
| + return thread_get_state(target_thread, x86_THREAD_STATE64, | 
| + reinterpret_cast<thread_state_t>(state), | 
| + &count) == KERN_SUCCESS; | 
| +} | 
| + | 
| +// If the value at |pointer| points to the original stack, rewrites it to point | 
| +// to the corresponding location in the copied stack. | 
| +// | 
| +// Note that this is called while a thread is suspended. Make very very sure | 
| +// that no shared resources (e.g. memory allocators) are used for the duration | 
| +// of this function. | 
| +uintptr_t RewritePointerIfInOriginalStack(uintptr_t* original_stack_bottom, | 
| + uintptr_t* original_stack_top, | 
| + uintptr_t* stack_copy_bottom, | 
| + uintptr_t pointer) { | 
| + uintptr_t original_stack_bottom_int = | 
| 
 
Robert Sesek
2017/03/15 21:57:25
(optional, here and throughout but only noted here
 
Mark Mentovai
2017/03/16 03:08:46
I see these casts here, there’s another set of cas
 
Avi (use Gerrit)
2017/03/18 03:09:27
Boy am I aware of the zillion types here :(
Somet
 
Mike Wittman
2017/03/20 19:21:30
The representational rules I tried to follow in th
 
Mike Wittman
2017/03/27 19:48:46
It's up to you if you think it's worth following t
 
Avi (use Gerrit)
2017/03/29 17:52:09
I tried modifying the pointers in NativeStackSampl
 
 | 
| + reinterpret_cast<uintptr_t>(original_stack_bottom); | 
| + uintptr_t original_stack_top_int = | 
| + reinterpret_cast<uintptr_t>(original_stack_top); | 
| + uintptr_t stack_copy_bottom_int = | 
| + reinterpret_cast<uintptr_t>(stack_copy_bottom); | 
| + | 
| + if ((pointer < original_stack_bottom_int) || | 
| + (pointer >= original_stack_top_int)) { | 
| + return pointer; | 
| + } | 
| + | 
| + return stack_copy_bottom_int + (pointer - original_stack_bottom_int); | 
| +} | 
| + | 
| +// Copies the stack to a buffer while rewriting possible pointers to locations | 
| +// within the stack to point to the corresponding locations in the copy. This is | 
| +// necessary to handle stack frames with dynamic stack allocation, where a | 
| +// pointer to the beginning of the dynamic allocation area is stored on the | 
| +// stack and/or in a non-volatile register. | 
| +// | 
| +// Eager rewriting of anything that looks like a pointer to the stack, as done | 
| +// in this function, does not adversely affect the stack unwinding. The only | 
| +// other values on the stack the unwinding depends on are return addresses, | 
| +// which should not point within the stack memory. The rewriting is guaranteed | 
| +// to catch all pointers because the stacks are guaranteed by the ABI to be | 
| +// sizeof(void*) aligned. | 
| +// | 
| +// Note that this is called while a thread is suspended. Make very very sure | 
| +// that no shared resources (e.g. memory allocators) are used for the duration | 
| +// of this function. | 
| +void CopyStackAndRewritePointers(void* dest, | 
| + void* from, | 
| + void* to, | 
| + x86_thread_state64_t* thread_state) | 
| + NO_SANITIZE("address") { | 
| + uintptr_t* original_stack_bottom = static_cast<uintptr_t*>(from); | 
| + uintptr_t* original_stack_top = static_cast<uintptr_t*>(to); | 
| + uintptr_t* stack_copy_bottom = static_cast<uintptr_t*>(dest); | 
| + | 
| + size_t count = original_stack_top - original_stack_bottom; | 
| + for (size_t pos = 0; pos < count; ++pos) { | 
| + stack_copy_bottom[pos] = RewritePointerIfInOriginalStack( | 
| + original_stack_bottom, original_stack_top, stack_copy_bottom, | 
| + original_stack_bottom[pos]); | 
| + } | 
| + | 
| + thread_state->__rbp = | 
| + RewritePointerIfInOriginalStack(original_stack_bottom, original_stack_top, | 
| + stack_copy_bottom, thread_state->__rbp); | 
| + thread_state->__rsp = | 
| + RewritePointerIfInOriginalStack(original_stack_bottom, original_stack_top, | 
| + stack_copy_bottom, thread_state->__rsp); | 
| 
 
Mark Mentovai
2017/03/16 03:08:45
Unwind info may reference and recover rbx, r12, r1
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| +} | 
| + | 
| +// Walks the stack represented by |unwind_context|, calling back to the provided | 
| +// lambda for each frame. Returns false if an error occurred, otherwise returns | 
| +// true. | 
| +template <typename StackFrameCallback> | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
Is this template thing just papering over the fact
 
Avi (use Gerrit)
2017/03/18 03:09:27
Absolutely.
Using a template like this is the mos
 
 | 
| +bool WalkStackFromContext(unw_context_t* unwind_context, | 
| + size_t* frame_count, | 
| + const StackFrameCallback& callback) { | 
| + unw_cursor_t unwind_cursor; | 
| + unw_init_local(&unwind_cursor, unwind_context); | 
| + | 
| + int step_result; | 
| + unw_word_t ip; | 
| + do { | 
| + ++(*frame_count); | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
It may be prudent to set a frame limit here to pre
 
Mike Wittman
2017/03/16 16:42:24
This is probably a good idea as a backstop. On Win
 
Avi (use Gerrit)
2017/03/18 03:09:27
Mark, would you be OK with this being in a followu
 
Mark Mentovai
2017/03/27 18:24:55
Certainly.
 
 | 
| + unw_get_reg(&unwind_cursor, UNW_REG_IP, &ip); | 
| + | 
| + callback(static_cast<uintptr_t>(ip)); | 
| + | 
| + step_result = unw_step(&unwind_cursor); | 
| + } while (step_result > 0); | 
| + | 
| + if (step_result != 0) | 
| + return false; | 
| + | 
| + return true; | 
| +} | 
| + | 
| +bool IsIPInValidImage(unw_context_t* unwind_context) { | 
| + unw_cursor_t unwind_cursor; | 
| + unw_init_local(&unwind_cursor, unwind_context); | 
| + unw_proc_info_t proc_info; | 
| + unw_get_proc_info(&unwind_cursor, &proc_info); | 
| + return proc_info.extra != 0; | 
| +} | 
| + | 
| +// Walks the stack represented by |thread_state|, calling back to the provided | 
| +// lambda for each frame. | 
| +template <typename StackFrameCallback> | 
| +void WalkStack(const x86_thread_state64_t& thread_state, | 
| + uintptr_t stack_top, | 
| + const StackFrameCallback& callback) { | 
| + size_t frame_count = 0; | 
| + // This uses libunwind to walk the stack. libunwind is designed to be used for | 
| + // a thread to walk its own stack. This creates two problems. | 
| + | 
| + // Problem 1: There is no official way to create a unw_context other than to | 
| + // create it from the current state of the current thread's stack. To get | 
| + // around this, forge a context. A unw_context is just a copy of the 16 main | 
| + // registers followed by the instruction pointer, nothing more. | 
| + // Coincidentally, the first 17 items of the x86_thread_state64_t type are | 
| + // exactly those registers in exactly the same order, so just bulk copy them | 
| + // over. | 
| + unw_context_t unwind_context; | 
| + memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17); | 
| + bool result = WalkStackFromContext(&unwind_context, &frame_count, callback); | 
| + | 
| + if (!result) | 
| + return; | 
| + | 
| + if (frame_count == 1) { | 
| + // Problem 2: Because libunwind is designed to be triggered by user code on | 
| + // their own thread, if it hits a library that has no unwind info for the | 
| + // function that is being executed, it just stops. This isn't a problem in | 
| + // the normal case, but in this case, it's quite possible that the stack | 
| + // being walked is stopped in a function that bridges to the kernel and thus | 
| + // is missing the unwind info. | 
| + // | 
| + // If so, cheat by scanning the stack and trying again. Only do this once, | 
| 
 
Robert Sesek
2017/03/15 21:57:25
This isn't quite scanning and more of an unwind vi
 
Mark Mentovai
2017/03/16 03:08:45
Robert Sesek wrote:
 
Mark Mentovai
2017/03/16 03:08:46
“Only do this once” makes it sound like you might
 
Avi (use Gerrit)
2017/03/18 03:09:27
Rewording.
 
 | 
| + // and only if the first time using libunwind fails after one frame. | 
| + bool ip_in_valid_image = false; | 
| + auto& rsp = unwind_context.data[7]; | 
| + auto& rip = unwind_context.data[16]; | 
| + do { | 
| + rip = *reinterpret_cast<uintptr_t*>(rsp); // rip = *rsp | 
| + rsp += 8; // rsp++ | 
| 
 
Robert Sesek
2017/03/15 21:57:26
8 -> sizeof(uintptr_t) to make it a little more po
 
Mark Mentovai
2017/03/16 16:53:46
One more thought for right here…
Things are only
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + ip_in_valid_image = IsIPInValidImage(&unwind_context); | 
| + } while (!ip_in_valid_image && rsp < stack_top); | 
| 
 
Mark Mentovai
2017/03/16 03:08:45
You should set some upper bound for scanning, not
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + | 
| + if (ip_in_valid_image) | 
| + WalkStackFromContext(&unwind_context, &frame_count, callback); | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
If this looks fishy and you tried scanning, you mi
 
Avi (use Gerrit)
2017/03/18 03:09:27
I don't understand what you mean here.
 
 | 
| + } | 
| +} | 
| + | 
| +// Module identifiers --------------------------------------------------------- | 
| + | 
| +// Fills |id| with the UUID of the x86_64 Mach-O binary with the header | 
| +// |mach_header|. Returns false if the binary is malformed or does not contain | 
| +// the UUID load command. | 
| +bool GetUUID(const mach_header_64* mach_header, unsigned char* id) { | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
Seems like id is really a uuid_t
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + size_t offset = sizeof(mach_header_64); | 
| + size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds; | 
| + for (uint32_t i = 0; (i < mach_header->ncmds) && | 
| + (offset + sizeof(load_command) < offset_limit); | 
| + ++i) { | 
| + const load_command* current_cmd = reinterpret_cast<const load_command*>( | 
| + reinterpret_cast<const uint8_t*>(mach_header) + offset); | 
| + | 
| + if (offset + current_cmd->cmdsize > offset_limit) { | 
| + // This command runs off the end of the command list. This is malformed. | 
| + return false; | 
| + } | 
| + | 
| + if (current_cmd->cmd == LC_UUID) { | 
| + if (current_cmd->cmdsize < sizeof(uuid_command)) { | 
| + // This "UUID command" is too small. This is malformed. | 
| + return false; | 
| + } | 
| + | 
| + const uuid_command* uuid_cmd = | 
| + reinterpret_cast<const uuid_command*>(current_cmd); | 
| + static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t), | 
| + "UUID field of UUID command should be 16 bytes."); | 
| + memcpy(id, &uuid_cmd->uuid, sizeof(uuid_t)); | 
| + return true; | 
| + } | 
| + offset += current_cmd->cmdsize; | 
| + } | 
| + return false; | 
| +} | 
| + | 
| +// Returns the hex encoding of a 16-byte ID for the binary loaded at | 
| +// |module_addr|. Returns an empty string if the UUID cannot be found at | 
| +// |module_addr|. | 
| +std::string GetUniqueId(const void* module_addr) { | 
| 
 
Mark Mentovai
2017/03/16 03:08:45
I can’t tell the difference between GetUUID and Ge
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + const mach_header_64* mach_header = | 
| + reinterpret_cast<const mach_header_64*>(module_addr); | 
| + DCHECK_EQ(MH_MAGIC_64, mach_header->magic); | 
| + | 
| + unsigned char id[sizeof(uuid_t)]; | 
| + if (!GetUUID(mach_header, id)) | 
| + return ""; | 
| + return HexEncode(id, sizeof(uuid_t)); | 
| +} | 
| + | 
| +// Gets the index for the Module containing |instruction_pointer| in | 
| +// |modules|, adding it if it's not already present. Returns | 
| +// StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be | 
| +// determined for |module|. | 
| +size_t GetModuleIndex(const uintptr_t instruction_pointer, | 
| + std::vector<StackSamplingProfiler::Module>* modules, | 
| + std::map<const void*, size_t>* profile_module_index) { | 
| + Dl_info inf; | 
| + if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf)) | 
| + return StackSamplingProfiler::Frame::kUnknownModuleIndex; | 
| + | 
| + auto module_index = profile_module_index->find(inf.dli_fbase); | 
| + if (module_index == profile_module_index->end()) { | 
| + StackSamplingProfiler::Module module( | 
| + reinterpret_cast<uintptr_t>(inf.dli_fbase), GetUniqueId(inf.dli_fbase), | 
| + base::FilePath(inf.dli_fname)); | 
| + modules->push_back(module); | 
| + module_index = | 
| + profile_module_index | 
| + ->insert(std::make_pair(inf.dli_fbase, modules->size() - 1)) | 
| + .first; | 
| + } | 
| + return module_index->second; | 
| +} | 
| + | 
| +// ScopedSuspendThread -------------------------------------------------------- | 
| + | 
| +// Suspends a thread for the lifetime of the object. | 
| +class ScopedSuspendThread { | 
| + public: | 
| + explicit ScopedSuspendThread(mach_port_t thread_port); | 
| + ~ScopedSuspendThread(); | 
| + | 
| + bool was_successful() const { return was_successful_; } | 
| + | 
| + private: | 
| + mach_port_t thread_port_; | 
| + bool was_successful_; | 
| + | 
| + DISALLOW_COPY_AND_ASSIGN(ScopedSuspendThread); | 
| +}; | 
| + | 
| +ScopedSuspendThread::ScopedSuspendThread(mach_port_t thread_port) | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
No reason to define these out-of-line, they’re sho
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + : thread_port_(thread_port), | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
Conserve:
    : thread_port_(thread_suspend(threa
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + was_successful_(thread_suspend(thread_port) == KERN_SUCCESS) {} | 
| + | 
| +ScopedSuspendThread::~ScopedSuspendThread() { | 
| + if (!was_successful_) | 
| + return; | 
| + | 
| + kern_return_t resume_result = thread_resume(thread_port_); | 
| + CHECK_EQ(KERN_SUCCESS, resume_result) << "thread_resume failed"; | 
| 
 
Robert Sesek
2017/03/15 21:57:25
MACH_CHECK here would log the Mach error too.
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| +} | 
| + | 
| +// NativeStackSamplerMac ------------------------------------------------------ | 
| + | 
| +class NativeStackSamplerMac : public NativeStackSampler { | 
| + public: | 
| + NativeStackSamplerMac(mach_port_t thread_port, | 
| + AnnotateCallback annotator, | 
| + NativeStackSamplerTestDelegate* test_delegate); | 
| + ~NativeStackSamplerMac() override; | 
| + | 
| + // StackSamplingProfiler::NativeStackSampler: | 
| + void ProfileRecordingStarting( | 
| + std::vector<StackSamplingProfiler::Module>* modules) override; | 
| + void RecordStackSample(StackSamplingProfiler::Sample* sample) override; | 
| + void ProfileRecordingStopped() override; | 
| + | 
| + private: | 
| + enum { | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
constexpr size_t
 
Avi (use Gerrit)
2017/03/18 03:09:27
This is in parallel to the windows version.
 
 | 
| + // Intended to hold the largest stack used by Chrome. The default macOS main | 
| + // thread stack size is 8 MB, and this allows for expansion if it occurs. | 
| + kStackCopyBufferSize = 12 * 1024 * 1024 | 
| + }; | 
| + | 
| + // Suspends the thread with |thread_port_|, copies its stack and resumes the | 
| + // thread, then records the stack frames and associated modules into |sample|. | 
| + void SuspendThreadAndRecordStack(StackSamplingProfiler::Sample* sample); | 
| + | 
| + // Weak reference: Mach port for thread being profiled. | 
| + mach_port_t thread_port_; | 
| + | 
| + const AnnotateCallback annotator_; | 
| + | 
| + NativeStackSamplerTestDelegate* const test_delegate_; | 
| + | 
| + // The stack base address corresponding to |thread_handle_|. | 
| + const void* const thread_stack_base_address_; | 
| + | 
| + // Buffer to use for copies of the stack. We use the same buffer for all the | 
| + // samples to avoid the overhead of multiple allocations and frees. | 
| + const std::unique_ptr<unsigned char[]> stack_copy_buffer_; | 
| + | 
| + // Weak. Points to the modules associated with the profile being recorded | 
| + // between ProfileRecordingStarting() and ProfileRecordingStopped(). | 
| + std::vector<StackSamplingProfiler::Module>* current_modules_ = nullptr; | 
| + | 
| + // Maps a module's base address to the corresponding Module's index within | 
| + // current_modules_. | 
| + std::map<const void*, size_t> profile_module_index_; | 
| + | 
| + DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerMac); | 
| +}; | 
| + | 
| +NativeStackSamplerMac::NativeStackSamplerMac( | 
| + mach_port_t thread_port, | 
| + AnnotateCallback annotator, | 
| + NativeStackSamplerTestDelegate* test_delegate) | 
| + : thread_port_(thread_port), | 
| + annotator_(annotator), | 
| + test_delegate_(test_delegate), | 
| + thread_stack_base_address_( | 
| + pthread_get_stackaddr_np(pthread_from_mach_thread_np(thread_port))), | 
| + stack_copy_buffer_(new unsigned char[kStackCopyBufferSize]) { | 
| + DCHECK(annotator_); | 
| + | 
| + // This class suspends threads, and those threads might be suspended in dyld. | 
| + // Therefore, for all the system functions that might be linked in dynamically | 
| + // that are used while threads are suspended, make calls to them to make sure | 
| + // that they are linked up. | 
| + x86_thread_state64_t thread_state; | 
| + GetThreadState(thread_port_, &thread_state); | 
| +} | 
| + | 
| +NativeStackSamplerMac::~NativeStackSamplerMac() {} | 
| + | 
| +void NativeStackSamplerMac::ProfileRecordingStarting( | 
| + std::vector<StackSamplingProfiler::Module>* modules) { | 
| + current_modules_ = modules; | 
| + profile_module_index_.clear(); | 
| +} | 
| + | 
| +void NativeStackSamplerMac::RecordStackSample( | 
| + StackSamplingProfiler::Sample* sample) { | 
| + DCHECK(current_modules_); | 
| + | 
| + if (!stack_copy_buffer_) | 
| 
 
Mark Mentovai
2017/03/16 03:08:45
The OOM killer should take care of this, we should
 
Avi (use Gerrit)
2017/03/18 03:09:28
parallel to windows version.
 
 | 
| + return; | 
| + | 
| + SuspendThreadAndRecordStack(sample); | 
| +} | 
| + | 
| +void NativeStackSamplerMac::ProfileRecordingStopped() { | 
| + current_modules_ = nullptr; | 
| +} | 
| + | 
| +void NativeStackSamplerMac::SuspendThreadAndRecordStack( | 
| + StackSamplingProfiler::Sample* sample) { | 
| + x86_thread_state64_t thread_state; | 
| + | 
| + // Copy the stack. | 
| + | 
| + uintptr_t new_stack_top = 0; | 
| + { | 
| + // IMPORTANT NOTE: Do not do ANYTHING in this in this scope that might | 
| + // allocate memory, including indirectly via use of DCHECK/CHECK or other | 
| + // logging statements. Otherwise this code can deadlock on heap locks in the | 
| + // default heap acquired by the target thread before it was suspended. | 
| + ScopedSuspendThread suspend_thread(thread_port_); | 
| + if (!suspend_thread.was_successful()) | 
| + return; | 
| + | 
| + if (!GetThreadState(thread_port_, &thread_state)) | 
| + return; | 
| + uintptr_t stack_top = | 
| + reinterpret_cast<uintptr_t>(thread_stack_base_address_); | 
| + uintptr_t stack_bottom = thread_state.__rsp; | 
| + uintptr_t stack_size = stack_top - stack_bottom; | 
| 
 
Mark Mentovai
2017/03/16 03:08:46
Normally I’d say to DCHECK_LE(stack_bottom, stack_
 
Avi (use Gerrit)
2017/03/18 03:09:27
Done.
 
 | 
| + | 
| + if (stack_size > kStackCopyBufferSize) | 
| 
 
Robert Sesek
2017/03/15 21:57:25
We may want monitoring here in case the room-to-gr
 
Mike Wittman
2017/03/15 22:09:56
We should be able to detect this server-side if we
 
Avi (use Gerrit)
2017/03/18 03:09:27
Acknowledged.
 
 | 
| + return; | 
| + | 
| + (*annotator_)(sample); | 
| + | 
| + CopyStackAndRewritePointers( | 
| + stack_copy_buffer_.get(), reinterpret_cast<void*>(stack_bottom), | 
| + reinterpret_cast<void*>(stack_top), &thread_state); | 
| + | 
| + new_stack_top = | 
| + reinterpret_cast<uintptr_t>(stack_copy_buffer_.get()) + stack_size; | 
| + } // ScopedSuspendThread | 
| + | 
| + if (test_delegate_) | 
| + test_delegate_->OnPreStackWalk(); | 
| + | 
| + // Walk the stack and record it. | 
| + | 
| + // Reserve enough memory for most stacks, to avoid repeated allocations. | 
| + // Approximately 99.9% of recorded stacks are 128 frames or fewer. | 
| + sample->frames.reserve(128); | 
| + | 
| + auto* current_modules = current_modules_; | 
| + auto* profile_module_index = &profile_module_index_; | 
| + WalkStack( | 
| + thread_state, new_stack_top, | 
| + [sample, current_modules, profile_module_index](uintptr_t frame_ip) { | 
| + sample->frames.push_back(StackSamplingProfiler::Frame( | 
| + frame_ip, | 
| + GetModuleIndex(frame_ip, current_modules, profile_module_index))); | 
| + }); | 
| +} | 
| + | 
| +} // namespace | 
| + | 
| +std::unique_ptr<NativeStackSampler> NativeStackSampler::Create( | 
| + PlatformThreadId thread_id, | 
| + AnnotateCallback annotator, | 
| + NativeStackSamplerTestDelegate* test_delegate) { | 
| +#if !defined(__x86_64__) | 
| + // No. | 
| 
 
Robert Sesek
2017/03/15 21:57:25
NOTIMPLEMENTED() maybe?
 
Mark Mentovai
2017/03/16 03:08:45
#error is best
 
Avi (use Gerrit)
2017/03/18 03:09:27
Windows silently returns a nullptr. Mike, WDYT?
 
Mike Wittman
2017/03/20 19:21:30
The calling code expects NativeStackSampler::Creat
 
 | 
| + return nullptr; | 
| +#endif | 
| + return base::MakeUnique<NativeStackSamplerMac>(thread_id, annotator, | 
| + test_delegate); | 
| +} | 
| + | 
| +} // namespace base |