Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <objbase.h> | 5 #include <objbase.h> |
| 6 #include <windows.h> | 6 #include <windows.h> |
| 7 | 7 |
| 8 #include <map> | 8 #include <map> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| 11 #include "base/containers/hash_tables.h" | |
| 11 #include "base/logging.h" | 12 #include "base/logging.h" |
| 13 #include "base/memory/singleton.h" | |
| 12 #include "base/profiler/native_stack_sampler.h" | 14 #include "base/profiler/native_stack_sampler.h" |
| 15 #include "base/stl_util.h" | |
| 13 #include "base/strings/string_util.h" | 16 #include "base/strings/string_util.h" |
| 14 #include "base/strings/stringprintf.h" | 17 #include "base/strings/stringprintf.h" |
| 15 #include "base/strings/utf_string_conversions.h" | 18 #include "base/strings/utf_string_conversions.h" |
| 16 #include "base/time/time.h" | 19 #include "base/time/time.h" |
| 17 #include "base/win/pe_image.h" | 20 #include "base/win/pe_image.h" |
| 18 #include "base/win/scoped_handle.h" | 21 #include "base/win/scoped_handle.h" |
| 19 | 22 |
| 20 namespace base { | 23 namespace base { |
| 21 | 24 |
| 25 // LeafUnwindBlacklist -------------------------------------------------------- | |
| 26 | |
| 22 namespace { | 27 namespace { |
| 23 | 28 |
| 29 // Records modules that are known to have functions that violate the Microsoft | |
| 30 // x64 calling convention and would be dangerous to manually unwind if | |
| 31 // encountered as the last frame on the call stack. | |
| 32 class LeafUnwindBlacklist { | |
| 33 public: | |
| 34 static LeafUnwindBlacklist* GetInstance(); | |
| 35 | |
| 36 // These functions do not allocate memory and are safe to call between | |
| 37 // SuspendThread and ResumeThread. | |
| 38 void RecordModuleForBlacklist(const void* module); | |
| 39 bool IsBlacklisted(const void* module) const; | |
| 40 | |
| 41 // Allocates memory. Must be invoked only after ResumeThread, otherwise | |
| 42 // RecordStack will eventually deadlock on a heap lock. | |
| 43 void AddRecordedModuleToBlacklistIfPresent(); | |
| 44 | |
| 45 private: | |
| 46 friend struct DefaultSingletonTraits<LeafUnwindBlacklist>; | |
| 47 | |
| 48 LeafUnwindBlacklist(); | |
| 49 ~LeafUnwindBlacklist(); | |
| 50 | |
| 51 // Holding location for a possible pending blacklisted module. We can't add | |
| 52 // the module to |blacklisted_modules_| between SuspendThread and ResumeThread | |
| 53 // because it may allocate memory and deadlock, so we store it here | |
| 54 // temporarily then move it to |blacklisted_modules_| after ResumeThread. | |
| 55 const void* pending_blacklisted_module_; | |
|
Nico
2015/08/15 01:44:18
I found it a bit confusing that this ephemeral val
Mike Wittman
2015/08/16 00:54:12
I suppose we could treat this as an out argument o
| |
| 56 | |
| 57 // The set of modules known to have functions that violate the Microsoft x64 | |
| 58 // calling convention. | |
| 59 base::hash_set<const void*> blacklisted_modules_; | |
| 60 | |
| 61 DISALLOW_COPY_AND_ASSIGN(LeafUnwindBlacklist); | |
| 62 }; | |
| 63 | |
| 64 // static | |
| 65 LeafUnwindBlacklist* LeafUnwindBlacklist::GetInstance() { | |
| 66 // Leaky for shutdown performance. | |
| 67 return Singleton<LeafUnwindBlacklist, | |
| 68 LeakySingletonTraits<LeafUnwindBlacklist>>::get(); | |
| 69 } | |
| 70 | |
| 71 void LeafUnwindBlacklist::RecordModuleForBlacklist(const void* module) { | |
| 72 CHECK(!pending_blacklisted_module_); | |
| 73 pending_blacklisted_module_ = module; | |
| 74 } | |
| 75 | |
| 76 bool LeafUnwindBlacklist::IsBlacklisted(const void* module) const { | |
| 77 return ContainsKey(blacklisted_modules_, module); | |
| 78 } | |
| 79 | |
| 80 void LeafUnwindBlacklist::AddRecordedModuleToBlacklistIfPresent() { | |
| 81 if (pending_blacklisted_module_) | |
| 82 blacklisted_modules_.insert(pending_blacklisted_module_); | |
| 83 pending_blacklisted_module_ = nullptr; | |
| 84 } | |
| 85 | |
| 86 LeafUnwindBlacklist::LeafUnwindBlacklist() | |
| 87 : pending_blacklisted_module_(nullptr) { | |
| 88 } | |
| 89 | |
| 90 LeafUnwindBlacklist::~LeafUnwindBlacklist() { | |
| 91 } | |
| 92 | |
| 93 // Stack recording functions -------------------------------------------------- | |
| 94 | |
| 24 // Walks the stack represented by |context| from the current frame downwards, | 95 // Walks the stack represented by |context| from the current frame downwards, |
| 25 // recording the instruction pointers for each frame in |instruction_pointers|. | 96 // recording the instruction pointers for each frame in |instruction_pointers|. |
| 26 int RecordStack(CONTEXT* context, | 97 int RecordStack(CONTEXT* context, |
| 27 int max_stack_size, | 98 int max_stack_size, |
| 28 const void* instruction_pointers[], | 99 const void* instruction_pointers[], |
| 29 bool* last_frame_is_unknown_function) { | 100 LeafUnwindBlacklist* leaf_unwind_blacklist) { |
| 30 #ifdef _WIN64 | 101 #ifdef _WIN64 |
| 31 *last_frame_is_unknown_function = false; | 102 bool unwind_info_present_for_all_frames = true; |
| 32 | 103 |
| 33 int i = 0; | 104 int i = 0; |
| 34 for (; (i < max_stack_size) && context->Rip; ++i) { | 105 for (; (i < max_stack_size) && context->Rip; ++i) { |
| 35 // Try to look up unwind metadata for the current function. | 106 // Try to look up unwind metadata for the current function. |
| 36 ULONG64 image_base; | 107 ULONG64 image_base; |
| 37 PRUNTIME_FUNCTION runtime_function = | 108 PRUNTIME_FUNCTION runtime_function = |
| 38 RtlLookupFunctionEntry(context->Rip, &image_base, nullptr); | 109 RtlLookupFunctionEntry(context->Rip, &image_base, nullptr); |
| 39 | 110 |
| 40 instruction_pointers[i] = reinterpret_cast<const void*>(context->Rip); | 111 instruction_pointers[i] = reinterpret_cast<const void*>(context->Rip); |
| 41 | 112 |
| 42 if (runtime_function) { | 113 if (runtime_function) { |
| 43 KNONVOLATILE_CONTEXT_POINTERS nvcontext = {}; | 114 KNONVOLATILE_CONTEXT_POINTERS nvcontext = {}; |
| 44 void* handler_data; | 115 void* handler_data; |
| 45 ULONG64 establisher_frame; | 116 ULONG64 establisher_frame; |
| 46 RtlVirtualUnwind(0, image_base, context->Rip, runtime_function, context, | 117 RtlVirtualUnwind(0, image_base, context->Rip, runtime_function, context, |
| 47 &handler_data, &establisher_frame, &nvcontext); | 118 &handler_data, &establisher_frame, &nvcontext); |
| 48 } else { | 119 } else { |
| 49 // If we don't have a RUNTIME_FUNCTION, then in theory this should be a | 120 // RtlLookupFunctionEntry didn't find unwind information. This could mean |
| 50 // leaf function whose frame contains only a return address, at | 121 // the code at the instruction pointer is in: |
| 51 // RSP. However, crash data also indicates that some third party libraries | |
| 52 // do not provide RUNTIME_FUNCTION information for non-leaf functions. We | |
| 53 // could manually unwind the stack in the former case, but attempting to | |
| 54 // do so in the latter case would produce wrong results and likely crash, | |
| 55 // so just bail out. | |
| 56 // | 122 // |
| 57 // Ad hoc runs with instrumentation show that ~5% of stack traces end with | 123 // 1. a true leaf function (i.e. a function that neither calls a function, |
| 58 // a valid leaf function. To avoid selectively omitting these traces it | 124 // nor allocates any stack space itself) in which case the return |
| 59 // makes sense to ultimately try to distinguish these two cases and | 125 // address is at RSP, or |
| 60 // selectively unwind the stack for legitimate leaf functions. For the | 126 // |
| 61 // purposes of avoiding crashes though, just ignore them all for now. | 127 // 2. a function that doesn't adhere to the Microsoft x64 calling |
| 62 return i; | 128 // convention, either by not providing the required unwind information, |
| 129 // or by not having the prologue or epilogue required for unwinding; | |
| 130 // this case has been observed in crash data in injected third party | |
| 131 // DLLs. | |
| 132 // | |
| 133 // In valid code, case 1 can only occur (by definition) as the last frame | |
| 134 // on the stack. This happens in about 5% of observed stacks and can | |
|
Nico
2015/08/15 01:44:18
This seems surprisingly high to me.
Mike Wittman
2015/08/16 00:54:12
That's the rough average I saw profiling startups
| |
| 135 // easily be unwound by popping RSP and using it as the next frame's | |
| 136 // instruction pointer. | |
| 137 // | |
| 138 // Case 2 can occur anywhere on the stack, and attempting to unwind the | |
| 139 // stack will result in treating whatever value happens to be on the stack | |
| 140 // at RSP as the next frame's instruction pointer. This is certainly wrong | |
| 141 // and very likely to lead to crashing by deferencing invalid pointers in | |
| 142 // the next RtlVirtualUnwind call. | |
| 143 // | |
| 144 // If we see case 2 at a location not the last frame, and all the previous | |
| 145 // frame had valid unwind information, then this is definitely bad code. | |
| 146 // We blacklist the module as untrustable for unwinding if we encounter a | |
|
Nico
2015/08/15 01:44:18
Is this blacklisting useful? You're expecting some
Mike Wittman
2015/08/16 00:54:11
That's correct, but the blacklisting should substa
Nico
2015/08/16 01:32:00
Ok, fair enough.
| |
| 147 // function in it that doesn't have unwind information. | |
| 148 | |
| 149 if (i == 0) { | |
| 150 // We are at the end of the stack. It's very likely that we're in case 1 | |
| 151 // since the vast majority of code adheres to the Microsoft x64 calling | |
| 152 // convention. But there's a small chance we might be unlucky and be in | |
| 153 // case 2. If this module is known to have bad code according to the | |
| 154 // leaf unwind blacklist, stop here, otherwise manually unwind. | |
| 155 if (leaf_unwind_blacklist->IsBlacklisted( | |
| 156 reinterpret_cast<const void*>(image_base))) { | |
| 157 return i + 1; | |
| 158 } | |
| 159 | |
| 160 context->Rip = context->Rsp; | |
| 161 context->Rsp += 8; | |
| 162 unwind_info_present_for_all_frames = false; | |
| 163 } else { | |
| 164 // We're not at the end of the stack. This frame is untrustworthy and we | |
| 165 // can't safely unwind from here. | |
| 166 if (unwind_info_present_for_all_frames) { | |
| 167 // Unwind information was present for all previous frames, so we can | |
| 168 // be confident this is case 2. Record the module to be blacklisted. | |
| 169 leaf_unwind_blacklist->RecordModuleForBlacklist( | |
| 170 reinterpret_cast<const void*>(image_base)); | |
| 171 } else { | |
| 172 // We started off on a function without unwind information. It's very | |
| 173 // likely that all frames up to this point have been good, and this | |
| 174 // frame is case 2. But it's possible that the initial frame was case | |
| 175 // 2 but hadn't been blacklisted yet, and we've started to go off into | |
| 176 // the weeds. Since we can't be sure, just bail out without | |
| 177 // blacklisting the module; chances are we'll later encounter the same | |
| 178 // function on a stack with full unwind information. | |
| 179 } | |
| 180 return i + 1; | |
| 181 } | |
| 63 } | 182 } |
| 64 } | 183 } |
| 65 return i; | 184 return i; |
| 66 #else | 185 #else |
| 67 return 0; | 186 return 0; |
| 68 #endif | 187 #endif |
| 69 } | 188 } |
| 70 | 189 |
| 71 // Fills in |module_handles| corresponding to the pointers to code in | 190 // Fills in |module_handles| corresponding to the pointers to code in |
| 72 // |addresses|. The module handles are returned with reference counts | 191 // |addresses|. The module handles are returned with reference counts |
| 73 // incremented and should be freed with FreeModuleHandles. See note in | 192 // incremented and should be freed with FreeModuleHandles. See note in |
| 74 // SuspendThreadAndRecordStack for why |addresses| and |module_handles| are | 193 // SuspendThreadAndRecordStack for why |addresses| and |module_handles| are |
| 75 // arrays. | 194 // arrays. |
| 76 void FindModuleHandlesForAddresses(const void* const addresses[], | 195 void FindModuleHandlesForAddresses(const void* const addresses[], |
| 77 HMODULE module_handles[], int stack_depth, | 196 HMODULE module_handles[], int stack_depth) { |
| 78 bool last_frame_is_unknown_function) { | 197 for (int i = 0; i < stack_depth; ++i) { |
| 79 const int module_frames = | |
| 80 last_frame_is_unknown_function ? stack_depth - 1 : stack_depth; | |
| 81 for (int i = 0; i < module_frames; ++i) { | |
| 82 HMODULE module_handle = NULL; | 198 HMODULE module_handle = NULL; |
| 83 if (GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, | 199 if (GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, |
| 84 reinterpret_cast<LPCTSTR>(addresses[i]), | 200 reinterpret_cast<LPCTSTR>(addresses[i]), |
| 85 &module_handle)) { | 201 &module_handle)) { |
| 86 // HMODULE actually represents the base address of the module, so we can | 202 // HMODULE actually represents the base address of the module, so we can |
| 87 // use it directly as an address. | 203 // use it directly as an address. |
| 88 DCHECK_LE(reinterpret_cast<const void*>(module_handle), addresses[i]); | 204 DCHECK_LE(reinterpret_cast<const void*>(module_handle), addresses[i]); |
| 89 module_handles[i] = module_handle; | 205 module_handles[i] = module_handle; |
| 90 } | 206 } |
| 91 } | 207 } |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 121 std::wstring build_id; | 237 std::wstring build_id; |
| 122 int result = | 238 int result = |
| 123 ::StringFromGUID2(guid, WriteInto(&build_id, kGUIDSize), kGUIDSize); | 239 ::StringFromGUID2(guid, WriteInto(&build_id, kGUIDSize), kGUIDSize); |
| 124 if (result != kGUIDSize) | 240 if (result != kGUIDSize) |
| 125 return std::string(); | 241 return std::string(); |
| 126 RemoveChars(build_id, L"{}-", &build_id); | 242 RemoveChars(build_id, L"{}-", &build_id); |
| 127 build_id += StringPrintf(L"%d", age); | 243 build_id += StringPrintf(L"%d", age); |
| 128 return WideToUTF8(build_id); | 244 return WideToUTF8(build_id); |
| 129 } | 245 } |
| 130 | 246 |
| 247 // ScopedDisablePriorityBoost ------------------------------------------------- | |
| 248 | |
| 131 // Disables priority boost on a thread for the lifetime of the object. | 249 // Disables priority boost on a thread for the lifetime of the object. |
| 132 class ScopedDisablePriorityBoost { | 250 class ScopedDisablePriorityBoost { |
| 133 public: | 251 public: |
| 134 ScopedDisablePriorityBoost(HANDLE thread_handle); | 252 ScopedDisablePriorityBoost(HANDLE thread_handle); |
| 135 ~ScopedDisablePriorityBoost(); | 253 ~ScopedDisablePriorityBoost(); |
| 136 | 254 |
| 137 private: | 255 private: |
| 138 HANDLE thread_handle_; | 256 HANDLE thread_handle_; |
| 139 BOOL got_previous_boost_state_; | 257 BOOL got_previous_boost_state_; |
| 140 BOOL boost_state_was_disabled_; | 258 BOOL boost_state_was_disabled_; |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 162 // Suspends the thread with |thread_handle|, records the stack into | 280 // Suspends the thread with |thread_handle|, records the stack into |
| 163 // |instruction_pointers|, then resumes the thread. Returns the size of the | 281 // |instruction_pointers|, then resumes the thread. Returns the size of the |
| 164 // stack. | 282 // stack. |
| 165 // | 283 // |
| 166 // IMPORTANT NOTE: No heap allocations may occur between SuspendThread and | 284 // IMPORTANT NOTE: No heap allocations may occur between SuspendThread and |
| 167 // ResumeThread. Otherwise this code can deadlock on heap locks acquired by the | 285 // ResumeThread. Otherwise this code can deadlock on heap locks acquired by the |
| 168 // target thread before it was suspended. This is why we pass instruction | 286 // target thread before it was suspended. This is why we pass instruction |
| 169 // pointers and module handles as preallocated arrays rather than vectors, since | 287 // pointers and module handles as preallocated arrays rather than vectors, since |
| 170 // vectors make it too easy to subtly allocate memory. | 288 // vectors make it too easy to subtly allocate memory. |
| 171 int SuspendThreadAndRecordStack(HANDLE thread_handle, int max_stack_size, | 289 int SuspendThreadAndRecordStack(HANDLE thread_handle, int max_stack_size, |
| 172 const void* instruction_pointers[], | 290 const void* instruction_pointers[]) { |
| 173 bool* last_frame_is_unknown_function) { | |
| 174 if (::SuspendThread(thread_handle) == -1) | 291 if (::SuspendThread(thread_handle) == -1) |
| 175 return 0; | 292 return 0; |
| 176 | 293 |
| 177 int stack_depth = 0; | 294 int stack_depth = 0; |
| 178 CONTEXT thread_context = {0}; | 295 CONTEXT thread_context = {0}; |
| 179 thread_context.ContextFlags = CONTEXT_FULL; | 296 thread_context.ContextFlags = CONTEXT_FULL; |
| 180 if (::GetThreadContext(thread_handle, &thread_context)) { | 297 if (::GetThreadContext(thread_handle, &thread_context)) { |
| 181 stack_depth = RecordStack(&thread_context, max_stack_size, | 298 stack_depth = RecordStack(&thread_context, max_stack_size, |
| 182 instruction_pointers, | 299 instruction_pointers, |
| 183 last_frame_is_unknown_function); | 300 LeafUnwindBlacklist::GetInstance()); |
| 184 } | 301 } |
| 185 | 302 |
| 186 // Disable the priority boost that the thread would otherwise receive on | 303 // Disable the priority boost that the thread would otherwise receive on |
| 187 // resume. We do this to avoid artificially altering the dynamics of the | 304 // resume. We do this to avoid artificially altering the dynamics of the |
| 188 // executing application any more than we already are by suspending and | 305 // executing application any more than we already are by suspending and |
| 189 // resuming the thread. | 306 // resuming the thread. |
| 190 // | 307 // |
| 191 // Note that this can racily disable a priority boost that otherwise would | 308 // Note that this can racily disable a priority boost that otherwise would |
| 192 // have been given to the thread, if the thread is waiting on other wait | 309 // have been given to the thread, if the thread is waiting on other wait |
| 193 // conditions at the time of SuspendThread and those conditions are satisfied | 310 // conditions at the time of SuspendThread and those conditions are satisfied |
| 194 // before priority boost is reenabled. The measured length of this window is | 311 // before priority boost is reenabled. The measured length of this window is |
| 195 // ~100us, so this should occur fairly rarely. | 312 // ~100us, so this should occur fairly rarely. |
| 196 ScopedDisablePriorityBoost disable_priority_boost(thread_handle); | 313 ScopedDisablePriorityBoost disable_priority_boost(thread_handle); |
| 197 bool resume_thread_succeeded = ::ResumeThread(thread_handle) != -1; | 314 bool resume_thread_succeeded = ::ResumeThread(thread_handle) != -1; |
| 198 CHECK(resume_thread_succeeded) << "ResumeThread failed: " << GetLastError(); | 315 CHECK(resume_thread_succeeded) << "ResumeThread failed: " << GetLastError(); |
| 199 | 316 |
| 317 LeafUnwindBlacklist::GetInstance()->AddRecordedModuleToBlacklistIfPresent(); | |
| 318 | |
| 200 return stack_depth; | 319 return stack_depth; |
| 201 } | 320 } |
| 202 | 321 |
| 322 // NativeStackSamplerWin ------------------------------------------------------ | |
| 323 | |
| 203 class NativeStackSamplerWin : public NativeStackSampler { | 324 class NativeStackSamplerWin : public NativeStackSampler { |
| 204 public: | 325 public: |
| 205 explicit NativeStackSamplerWin(win::ScopedHandle thread_handle); | 326 explicit NativeStackSamplerWin(win::ScopedHandle thread_handle); |
| 206 ~NativeStackSamplerWin() override; | 327 ~NativeStackSamplerWin() override; |
| 207 | 328 |
| 208 // StackSamplingProfiler::NativeStackSampler: | 329 // StackSamplingProfiler::NativeStackSampler: |
| 209 void ProfileRecordingStarting( | 330 void ProfileRecordingStarting( |
| 210 std::vector<StackSamplingProfiler::Module>* modules) override; | 331 std::vector<StackSamplingProfiler::Module>* modules) override; |
| 211 void RecordStackSample(StackSamplingProfiler::Sample* sample) override; | 332 void RecordStackSample(StackSamplingProfiler::Sample* sample) override; |
| 212 void ProfileRecordingStopped() override; | 333 void ProfileRecordingStopped() override; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 257 } | 378 } |
| 258 | 379 |
| 259 void NativeStackSamplerWin::RecordStackSample( | 380 void NativeStackSamplerWin::RecordStackSample( |
| 260 StackSamplingProfiler::Sample* sample) { | 381 StackSamplingProfiler::Sample* sample) { |
| 261 DCHECK(current_modules_); | 382 DCHECK(current_modules_); |
| 262 | 383 |
| 263 const int max_stack_size = 64; | 384 const int max_stack_size = 64; |
| 264 const void* instruction_pointers[max_stack_size] = {0}; | 385 const void* instruction_pointers[max_stack_size] = {0}; |
| 265 HMODULE module_handles[max_stack_size] = {0}; | 386 HMODULE module_handles[max_stack_size] = {0}; |
| 266 | 387 |
| 267 bool last_frame_is_unknown_function = false; | 388 int stack_depth = SuspendThreadAndRecordStack(thread_handle_.Get(), |
| 268 int stack_depth = SuspendThreadAndRecordStack( | 389 max_stack_size, |
| 269 thread_handle_.Get(), max_stack_size, instruction_pointers, | 390 instruction_pointers); |
| 270 &last_frame_is_unknown_function); | |
| 271 FindModuleHandlesForAddresses(instruction_pointers, module_handles, | 391 FindModuleHandlesForAddresses(instruction_pointers, module_handles, |
| 272 stack_depth, last_frame_is_unknown_function); | 392 stack_depth); |
| 273 CopyToSample(instruction_pointers, module_handles, stack_depth, sample, | 393 CopyToSample(instruction_pointers, module_handles, stack_depth, sample, |
| 274 current_modules_); | 394 current_modules_); |
| 275 FreeModuleHandles(stack_depth, module_handles); | 395 FreeModuleHandles(stack_depth, module_handles); |
| 276 } | 396 } |
| 277 | 397 |
| 278 void NativeStackSamplerWin::ProfileRecordingStopped() { | 398 void NativeStackSamplerWin::ProfileRecordingStopped() { |
| 279 current_modules_ = nullptr; | 399 current_modules_ = nullptr; |
| 280 } | 400 } |
| 281 | 401 |
| 282 // static | 402 // static |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 348 | 468 |
| 349 if (thread_handle) { | 469 if (thread_handle) { |
| 350 return scoped_ptr<NativeStackSampler>(new NativeStackSamplerWin( | 470 return scoped_ptr<NativeStackSampler>(new NativeStackSamplerWin( |
| 351 win::ScopedHandle(thread_handle))); | 471 win::ScopedHandle(thread_handle))); |
| 352 } | 472 } |
| 353 #endif | 473 #endif |
| 354 return scoped_ptr<NativeStackSampler>(); | 474 return scoped_ptr<NativeStackSampler>(); |
| 355 } | 475 } |
| 356 | 476 |
| 357 } // namespace base | 477 } // namespace base |
| OLD | NEW |