| OLD | NEW |
| (Empty) |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/libsampler/v8-sampler.h" | |
| 6 | |
| 7 #if V8_OS_POSIX && !V8_OS_CYGWIN | |
| 8 | |
| 9 #define USE_SIGNALS | |
| 10 | |
| 11 #include <errno.h> | |
| 12 #include <pthread.h> | |
| 13 #include <signal.h> | |
| 14 #include <sys/time.h> | |
| 15 | |
| 16 #if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX | |
| 17 #include <sys/syscall.h> // NOLINT | |
| 18 #endif | |
| 19 | |
| 20 #if V8_OS_MACOSX | |
| 21 #include <mach/mach.h> | |
| 22 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h> | |
| 23 // and is a typedef for struct sigcontext. There is no uc_mcontext. | |
| 24 #elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \ | |
| 25 !V8_OS_OPENBSD && !V8_OS_NACL | |
| 26 #include <ucontext.h> | |
| 27 #endif | |
| 28 | |
| 29 #include <unistd.h> | |
| 30 | |
| 31 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. | |
| 32 // Old versions of the C library <signal.h> didn't define the type. | |
| 33 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \ | |
| 34 (defined(__arm__) || defined(__aarch64__)) && \ | |
| 35 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) | |
| 36 #include <asm/sigcontext.h> // NOLINT | |
| 37 #endif | |
| 38 | |
| 39 #elif V8_OS_WIN || V8_OS_CYGWIN | |
| 40 | |
| 41 #include "src/base/win32-headers.h" | |
| 42 | |
| 43 #endif | |
| 44 | |
| 45 #include <algorithm> | |
| 46 #include <vector> | |
| 47 #include <map> | |
| 48 | |
| 49 #include "src/base/atomic-utils.h" | |
| 50 #include "src/base/hashmap.h" | |
| 51 #include "src/base/platform/platform.h" | |
| 52 | |
| 53 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) | |
| 54 | |
| 55 // Not all versions of Android's C library provide ucontext_t. | |
| 56 // Detect this and provide custom but compatible definitions. Note that these | |
| 57 // follow the GLibc naming convention to access register values from | |
| 58 // mcontext_t. | |
| 59 // | |
| 60 // See http://code.google.com/p/android/issues/detail?id=34784 | |
| 61 | |
| 62 #if defined(__arm__) | |
| 63 | |
| 64 typedef struct sigcontext mcontext_t; | |
| 65 | |
| 66 typedef struct ucontext { | |
| 67 uint32_t uc_flags; | |
| 68 struct ucontext* uc_link; | |
| 69 stack_t uc_stack; | |
| 70 mcontext_t uc_mcontext; | |
| 71 // Other fields are not used by V8, don't define them here. | |
| 72 } ucontext_t; | |
| 73 | |
| 74 #elif defined(__aarch64__) | |
| 75 | |
| 76 typedef struct sigcontext mcontext_t; | |
| 77 | |
| 78 typedef struct ucontext { | |
| 79 uint64_t uc_flags; | |
| 80 struct ucontext *uc_link; | |
| 81 stack_t uc_stack; | |
| 82 mcontext_t uc_mcontext; | |
| 83 // Other fields are not used by V8, don't define them here. | |
| 84 } ucontext_t; | |
| 85 | |
| 86 #elif defined(__mips__) | |
| 87 // MIPS version of sigcontext, for Android bionic. | |
| 88 typedef struct { | |
| 89 uint32_t regmask; | |
| 90 uint32_t status; | |
| 91 uint64_t pc; | |
| 92 uint64_t gregs[32]; | |
| 93 uint64_t fpregs[32]; | |
| 94 uint32_t acx; | |
| 95 uint32_t fpc_csr; | |
| 96 uint32_t fpc_eir; | |
| 97 uint32_t used_math; | |
| 98 uint32_t dsp; | |
| 99 uint64_t mdhi; | |
| 100 uint64_t mdlo; | |
| 101 uint32_t hi1; | |
| 102 uint32_t lo1; | |
| 103 uint32_t hi2; | |
| 104 uint32_t lo2; | |
| 105 uint32_t hi3; | |
| 106 uint32_t lo3; | |
| 107 } mcontext_t; | |
| 108 | |
| 109 typedef struct ucontext { | |
| 110 uint32_t uc_flags; | |
| 111 struct ucontext* uc_link; | |
| 112 stack_t uc_stack; | |
| 113 mcontext_t uc_mcontext; | |
| 114 // Other fields are not used by V8, don't define them here. | |
| 115 } ucontext_t; | |
| 116 | |
| 117 #elif defined(__i386__) | |
| 118 // x86 version for Android. | |
| 119 typedef struct { | |
| 120 uint32_t gregs[19]; | |
| 121 void* fpregs; | |
| 122 uint32_t oldmask; | |
| 123 uint32_t cr2; | |
| 124 } mcontext_t; | |
| 125 | |
| 126 typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks | |
| 127 typedef struct ucontext { | |
| 128 uint32_t uc_flags; | |
| 129 struct ucontext* uc_link; | |
| 130 stack_t uc_stack; | |
| 131 mcontext_t uc_mcontext; | |
| 132 // Other fields are not used by V8, don't define them here. | |
| 133 } ucontext_t; | |
| 134 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 }; | |
| 135 | |
| 136 #elif defined(__x86_64__) | |
| 137 // x64 version for Android. | |
| 138 typedef struct { | |
| 139 uint64_t gregs[23]; | |
| 140 void* fpregs; | |
| 141 uint64_t __reserved1[8]; | |
| 142 } mcontext_t; | |
| 143 | |
| 144 typedef struct ucontext { | |
| 145 uint64_t uc_flags; | |
| 146 struct ucontext *uc_link; | |
| 147 stack_t uc_stack; | |
| 148 mcontext_t uc_mcontext; | |
| 149 // Other fields are not used by V8, don't define them here. | |
| 150 } ucontext_t; | |
| 151 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 }; | |
| 152 #endif | |
| 153 | |
| 154 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) | |
| 155 | |
| 156 | |
| 157 namespace v8 { | |
| 158 namespace sampler { | |
| 159 | |
| 160 namespace { | |
| 161 | |
| 162 #if defined(USE_SIGNALS) | |
| 163 typedef std::vector<Sampler*> SamplerList; | |
| 164 typedef SamplerList::iterator SamplerListIterator; | |
| 165 typedef base::AtomicValue<bool> AtomicMutex; | |
| 166 | |
| 167 class AtomicGuard { | |
| 168 public: | |
| 169 explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true) | |
| 170 : atomic_(atomic), is_success_(false) { | |
| 171 do { | |
| 172 // Use Acquire_Load to gain mutual exclusion. | |
| 173 USE(atomic_->Value()); | |
| 174 is_success_ = atomic_->TrySetValue(false, true); | |
| 175 } while (is_blocking && !is_success_); | |
| 176 } | |
| 177 | |
| 178 bool is_success() const { return is_success_; } | |
| 179 | |
| 180 ~AtomicGuard() { | |
| 181 if (!is_success_) return; | |
| 182 atomic_->SetValue(false); | |
| 183 } | |
| 184 | |
| 185 private: | |
| 186 AtomicMutex* const atomic_; | |
| 187 bool is_success_; | |
| 188 }; | |
| 189 | |
| 190 // Returns key for hash map. | |
| 191 void* ThreadKey(pthread_t thread_id) { | |
| 192 return reinterpret_cast<void*>(thread_id); | |
| 193 } | |
| 194 | |
| 195 // Returns hash value for hash map. | |
| 196 uint32_t ThreadHash(pthread_t thread_id) { | |
| 197 #if V8_OS_MACOSX | |
| 198 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id)); | |
| 199 #else | |
| 200 return static_cast<uint32_t>(thread_id); | |
| 201 #endif | |
| 202 } | |
| 203 | |
| 204 #endif // USE_SIGNALS | |
| 205 | |
| 206 } // namespace | |
| 207 | |
| 208 #if defined(USE_SIGNALS) | |
| 209 | |
| 210 class Sampler::PlatformData { | |
| 211 public: | |
| 212 PlatformData() : vm_tid_(pthread_self()) {} | |
| 213 pthread_t vm_tid() const { return vm_tid_; } | |
| 214 | |
| 215 private: | |
| 216 pthread_t vm_tid_; | |
| 217 }; | |
| 218 | |
| 219 class SamplerManager { | |
| 220 public: | |
| 221 SamplerManager() : sampler_map_(base::HashMap::PointersMatch) {} | |
| 222 | |
| 223 void AddSampler(Sampler* sampler) { | |
| 224 AtomicGuard atomic_guard(&samplers_access_counter_); | |
| 225 DCHECK(sampler->IsActive() || !sampler->IsRegistered()); | |
| 226 // Add sampler into map if needed. | |
| 227 pthread_t thread_id = sampler->platform_data()->vm_tid(); | |
| 228 base::HashMap::Entry* entry = | |
| 229 sampler_map_.LookupOrInsert(ThreadKey(thread_id), | |
| 230 ThreadHash(thread_id)); | |
| 231 DCHECK(entry != nullptr); | |
| 232 if (entry->value == nullptr) { | |
| 233 SamplerList* samplers = new SamplerList(); | |
| 234 samplers->push_back(sampler); | |
| 235 entry->value = samplers; | |
| 236 } else { | |
| 237 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); | |
| 238 bool exists = false; | |
| 239 for (SamplerListIterator iter = samplers->begin(); | |
| 240 iter != samplers->end(); ++iter) { | |
| 241 if (*iter == sampler) { | |
| 242 exists = true; | |
| 243 break; | |
| 244 } | |
| 245 } | |
| 246 if (!exists) { | |
| 247 samplers->push_back(sampler); | |
| 248 } | |
| 249 } | |
| 250 } | |
| 251 | |
| 252 void RemoveSampler(Sampler* sampler) { | |
| 253 AtomicGuard atomic_guard(&samplers_access_counter_); | |
| 254 DCHECK(sampler->IsActive() || sampler->IsRegistered()); | |
| 255 // Remove sampler from map. | |
| 256 pthread_t thread_id = sampler->platform_data()->vm_tid(); | |
| 257 void* thread_key = ThreadKey(thread_id); | |
| 258 uint32_t thread_hash = ThreadHash(thread_id); | |
| 259 base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash); | |
| 260 DCHECK(entry != nullptr); | |
| 261 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); | |
| 262 for (SamplerListIterator iter = samplers->begin(); iter != samplers->end(); | |
| 263 ++iter) { | |
| 264 if (*iter == sampler) { | |
| 265 samplers->erase(iter); | |
| 266 break; | |
| 267 } | |
| 268 } | |
| 269 if (samplers->empty()) { | |
| 270 sampler_map_.Remove(thread_key, thread_hash); | |
| 271 delete samplers; | |
| 272 } | |
| 273 } | |
| 274 | |
| 275 #if defined(USE_SIGNALS) | |
| 276 void DoSample(const v8::RegisterState& state) { | |
| 277 AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false); | |
| 278 if (!atomic_guard.is_success()) return; | |
| 279 pthread_t thread_id = pthread_self(); | |
| 280 base::HashMap::Entry* entry = | |
| 281 sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id)); | |
| 282 if (!entry) return; | |
| 283 SamplerList& samplers = *static_cast<SamplerList*>(entry->value); | |
| 284 | |
| 285 for (int i = 0; i < samplers.size(); ++i) { | |
| 286 Sampler* sampler = samplers[i]; | |
| 287 Isolate* isolate = sampler->isolate(); | |
| 288 // We require a fully initialized and entered isolate. | |
| 289 if (isolate == nullptr || !isolate->IsInUse()) continue; | |
| 290 if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue; | |
| 291 sampler->SampleStack(state); | |
| 292 } | |
| 293 } | |
| 294 #endif | |
| 295 | |
| 296 static SamplerManager* instance() { return instance_.Pointer(); } | |
| 297 | |
| 298 private: | |
| 299 base::HashMap sampler_map_; | |
| 300 static AtomicMutex samplers_access_counter_; | |
| 301 static base::LazyInstance<SamplerManager>::type instance_; | |
| 302 }; | |
| 303 | |
| 304 AtomicMutex SamplerManager::samplers_access_counter_; | |
| 305 base::LazyInstance<SamplerManager>::type SamplerManager::instance_ = | |
| 306 LAZY_INSTANCE_INITIALIZER; | |
| 307 | |
| 308 #elif V8_OS_WIN || V8_OS_CYGWIN | |
| 309 | |
| 310 // ---------------------------------------------------------------------------- | |
| 311 // Win32 profiler support. On Cygwin we use the same sampler implementation as | |
| 312 // on Win32. | |
| 313 | |
| 314 class Sampler::PlatformData { | |
| 315 public: | |
| 316 // Get a handle to the calling thread. This is the thread that we are | |
| 317 // going to profile. We need to make a copy of the handle because we are | |
| 318 // going to use it in the sampler thread. Using GetThreadHandle() will | |
| 319 // not work in this case. We're using OpenThread because DuplicateHandle | |
| 320 // for some reason doesn't work in Chrome's sandbox. | |
| 321 PlatformData() | |
| 322 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | | |
| 323 THREAD_SUSPEND_RESUME | | |
| 324 THREAD_QUERY_INFORMATION, | |
| 325 false, | |
| 326 GetCurrentThreadId())) {} | |
| 327 | |
| 328 ~PlatformData() { | |
| 329 if (profiled_thread_ != nullptr) { | |
| 330 CloseHandle(profiled_thread_); | |
| 331 profiled_thread_ = nullptr; | |
| 332 } | |
| 333 } | |
| 334 | |
| 335 HANDLE profiled_thread() { return profiled_thread_; } | |
| 336 | |
| 337 private: | |
| 338 HANDLE profiled_thread_; | |
| 339 }; | |
| 340 #endif // USE_SIGNALS | |
| 341 | |
| 342 | |
| 343 #if defined(USE_SIGNALS) | |
| 344 class SignalHandler { | |
| 345 public: | |
| 346 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } | |
| 347 static void TearDown() { | |
| 348 delete mutex_; | |
| 349 mutex_ = nullptr; | |
| 350 } | |
| 351 | |
| 352 static void IncreaseSamplerCount() { | |
| 353 base::LockGuard<base::Mutex> lock_guard(mutex_); | |
| 354 if (++client_count_ == 1) Install(); | |
| 355 } | |
| 356 | |
| 357 static void DecreaseSamplerCount() { | |
| 358 base::LockGuard<base::Mutex> lock_guard(mutex_); | |
| 359 if (--client_count_ == 0) Restore(); | |
| 360 } | |
| 361 | |
| 362 static bool Installed() { | |
| 363 base::LockGuard<base::Mutex> lock_guard(mutex_); | |
| 364 return signal_handler_installed_; | |
| 365 } | |
| 366 | |
| 367 private: | |
| 368 static void Install() { | |
| 369 #if !V8_OS_NACL | |
| 370 struct sigaction sa; | |
| 371 sa.sa_sigaction = &HandleProfilerSignal; | |
| 372 sigemptyset(&sa.sa_mask); | |
| 373 #if V8_OS_QNX | |
| 374 sa.sa_flags = SA_SIGINFO; | |
| 375 #else | |
| 376 sa.sa_flags = SA_RESTART | SA_SIGINFO; | |
| 377 #endif | |
| 378 signal_handler_installed_ = | |
| 379 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); | |
| 380 #endif // !V8_OS_NACL | |
| 381 } | |
| 382 | |
| 383 static void Restore() { | |
| 384 #if !V8_OS_NACL | |
| 385 if (signal_handler_installed_) { | |
| 386 sigaction(SIGPROF, &old_signal_handler_, 0); | |
| 387 signal_handler_installed_ = false; | |
| 388 } | |
| 389 #endif | |
| 390 } | |
| 391 | |
| 392 #if !V8_OS_NACL | |
| 393 static void FillRegisterState(void* context, RegisterState* regs); | |
| 394 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); | |
| 395 #endif | |
| 396 // Protects the process wide state below. | |
| 397 static base::Mutex* mutex_; | |
| 398 static int client_count_; | |
| 399 static bool signal_handler_installed_; | |
| 400 static struct sigaction old_signal_handler_; | |
| 401 }; | |
| 402 | |
| 403 base::Mutex* SignalHandler::mutex_ = nullptr; | |
| 404 int SignalHandler::client_count_ = 0; | |
| 405 struct sigaction SignalHandler::old_signal_handler_; | |
| 406 bool SignalHandler::signal_handler_installed_ = false; | |
| 407 | |
| 408 | |
| 409 // As Native Client does not support signal handling, profiling is disabled. | |
| 410 #if !V8_OS_NACL | |
| 411 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, | |
| 412 void* context) { | |
| 413 USE(info); | |
| 414 if (signal != SIGPROF) return; | |
| 415 v8::RegisterState state; | |
| 416 FillRegisterState(context, &state); | |
| 417 SamplerManager::instance()->DoSample(state); | |
| 418 } | |
| 419 | |
| 420 void SignalHandler::FillRegisterState(void* context, RegisterState* state) { | |
| 421 // Extracting the sample from the context is extremely machine dependent. | |
| 422 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); | |
| 423 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) | |
| 424 mcontext_t& mcontext = ucontext->uc_mcontext; | |
| 425 #endif | |
| 426 #if V8_OS_LINUX | |
| 427 #if V8_HOST_ARCH_IA32 | |
| 428 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]); | |
| 429 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]); | |
| 430 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]); | |
| 431 #elif V8_HOST_ARCH_X64 | |
| 432 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]); | |
| 433 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]); | |
| 434 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]); | |
| 435 #elif V8_HOST_ARCH_ARM | |
| 436 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) | |
| 437 // Old GLibc ARM versions used a gregs[] array to access the register | |
| 438 // values from mcontext_t. | |
| 439 state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]); | |
| 440 state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]); | |
| 441 state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]); | |
| 442 #else | |
| 443 state->pc = reinterpret_cast<void*>(mcontext.arm_pc); | |
| 444 state->sp = reinterpret_cast<void*>(mcontext.arm_sp); | |
| 445 state->fp = reinterpret_cast<void*>(mcontext.arm_fp); | |
| 446 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) | |
| 447 #elif V8_HOST_ARCH_ARM64 | |
| 448 state->pc = reinterpret_cast<void*>(mcontext.pc); | |
| 449 state->sp = reinterpret_cast<void*>(mcontext.sp); | |
| 450 // FP is an alias for x29. | |
| 451 state->fp = reinterpret_cast<void*>(mcontext.regs[29]); | |
| 452 #elif V8_HOST_ARCH_MIPS | |
| 453 state->pc = reinterpret_cast<void*>(mcontext.pc); | |
| 454 state->sp = reinterpret_cast<void*>(mcontext.gregs[29]); | |
| 455 state->fp = reinterpret_cast<void*>(mcontext.gregs[30]); | |
| 456 #elif V8_HOST_ARCH_MIPS64 | |
| 457 state->pc = reinterpret_cast<void*>(mcontext.pc); | |
| 458 state->sp = reinterpret_cast<void*>(mcontext.gregs[29]); | |
| 459 state->fp = reinterpret_cast<void*>(mcontext.gregs[30]); | |
| 460 #elif V8_HOST_ARCH_PPC | |
| 461 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip); | |
| 462 state->sp = | |
| 463 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]); | |
| 464 state->fp = | |
| 465 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]); | |
| 466 #elif V8_HOST_ARCH_S390 | |
| 467 #if V8_TARGET_ARCH_32_BIT | |
| 468 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing | |
| 469 // mode. This bit needs to be masked out to resolve actual address. | |
| 470 state->pc = | |
| 471 reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); | |
| 472 #else | |
| 473 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr); | |
| 474 #endif // V8_TARGET_ARCH_32_BIT | |
| 475 state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]); | |
| 476 state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]); | |
| 477 #endif // V8_HOST_ARCH_* | |
| 478 #elif V8_OS_MACOSX | |
| 479 #if V8_HOST_ARCH_X64 | |
| 480 #if __DARWIN_UNIX03 | |
| 481 state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip); | |
| 482 state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp); | |
| 483 state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp); | |
| 484 #else // !__DARWIN_UNIX03 | |
| 485 state->pc = reinterpret_cast<void*>(mcontext->ss.rip); | |
| 486 state->sp = reinterpret_cast<void*>(mcontext->ss.rsp); | |
| 487 state->fp = reinterpret_cast<void*>(mcontext->ss.rbp); | |
| 488 #endif // __DARWIN_UNIX03 | |
| 489 #elif V8_HOST_ARCH_IA32 | |
| 490 #if __DARWIN_UNIX03 | |
| 491 state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip); | |
| 492 state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp); | |
| 493 state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp); | |
| 494 #else // !__DARWIN_UNIX03 | |
| 495 state->pc = reinterpret_cast<void*>(mcontext->ss.eip); | |
| 496 state->sp = reinterpret_cast<void*>(mcontext->ss.esp); | |
| 497 state->fp = reinterpret_cast<void*>(mcontext->ss.ebp); | |
| 498 #endif // __DARWIN_UNIX03 | |
| 499 #endif // V8_HOST_ARCH_IA32 | |
| 500 #elif V8_OS_FREEBSD | |
| 501 #if V8_HOST_ARCH_IA32 | |
| 502 state->pc = reinterpret_cast<void*>(mcontext.mc_eip); | |
| 503 state->sp = reinterpret_cast<void*>(mcontext.mc_esp); | |
| 504 state->fp = reinterpret_cast<void*>(mcontext.mc_ebp); | |
| 505 #elif V8_HOST_ARCH_X64 | |
| 506 state->pc = reinterpret_cast<void*>(mcontext.mc_rip); | |
| 507 state->sp = reinterpret_cast<void*>(mcontext.mc_rsp); | |
| 508 state->fp = reinterpret_cast<void*>(mcontext.mc_rbp); | |
| 509 #elif V8_HOST_ARCH_ARM | |
| 510 state->pc = reinterpret_cast<void*>(mcontext.mc_r15); | |
| 511 state->sp = reinterpret_cast<void*>(mcontext.mc_r13); | |
| 512 state->fp = reinterpret_cast<void*>(mcontext.mc_r11); | |
| 513 #endif // V8_HOST_ARCH_* | |
| 514 #elif V8_OS_NETBSD | |
| 515 #if V8_HOST_ARCH_IA32 | |
| 516 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]); | |
| 517 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]); | |
| 518 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]); | |
| 519 #elif V8_HOST_ARCH_X64 | |
| 520 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]); | |
| 521 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]); | |
| 522 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]); | |
| 523 #endif // V8_HOST_ARCH_* | |
| 524 #elif V8_OS_OPENBSD | |
| 525 #if V8_HOST_ARCH_IA32 | |
| 526 state->pc = reinterpret_cast<void*>(ucontext->sc_eip); | |
| 527 state->sp = reinterpret_cast<void*>(ucontext->sc_esp); | |
| 528 state->fp = reinterpret_cast<void*>(ucontext->sc_ebp); | |
| 529 #elif V8_HOST_ARCH_X64 | |
| 530 state->pc = reinterpret_cast<void*>(ucontext->sc_rip); | |
| 531 state->sp = reinterpret_cast<void*>(ucontext->sc_rsp); | |
| 532 state->fp = reinterpret_cast<void*>(ucontext->sc_rbp); | |
| 533 #endif // V8_HOST_ARCH_* | |
| 534 #elif V8_OS_SOLARIS | |
| 535 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]); | |
| 536 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]); | |
| 537 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]); | |
| 538 #elif V8_OS_QNX | |
| 539 #if V8_HOST_ARCH_IA32 | |
| 540 state->pc = reinterpret_cast<void*>(mcontext.cpu.eip); | |
| 541 state->sp = reinterpret_cast<void*>(mcontext.cpu.esp); | |
| 542 state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp); | |
| 543 #elif V8_HOST_ARCH_ARM | |
| 544 state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]); | |
| 545 state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]); | |
| 546 state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]); | |
| 547 #endif // V8_HOST_ARCH_* | |
| 548 #elif V8_OS_AIX | |
| 549 state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar); | |
| 550 state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]); | |
| 551 state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]); | |
| 552 #endif // V8_OS_AIX | |
| 553 } | |
| 554 | |
| 555 #endif // !V8_OS_NACL | |
| 556 | |
| 557 #endif // USE_SIGNALS | |
| 558 | |
| 559 | |
| 560 void Sampler::SetUp() { | |
| 561 #if defined(USE_SIGNALS) | |
| 562 SignalHandler::SetUp(); | |
| 563 #endif | |
| 564 } | |
| 565 | |
| 566 | |
| 567 void Sampler::TearDown() { | |
| 568 #if defined(USE_SIGNALS) | |
| 569 SignalHandler::TearDown(); | |
| 570 #endif | |
| 571 } | |
| 572 | |
| 573 Sampler::Sampler(Isolate* isolate) | |
| 574 : is_counting_samples_(false), | |
| 575 js_sample_count_(0), | |
| 576 external_sample_count_(0), | |
| 577 isolate_(isolate), | |
| 578 profiling_(false), | |
| 579 has_processing_thread_(false), | |
| 580 active_(false), | |
| 581 registered_(false) { | |
| 582 data_ = new PlatformData; | |
| 583 } | |
| 584 | |
| 585 Sampler::~Sampler() { | |
| 586 DCHECK(!IsActive()); | |
| 587 #if defined(USE_SIGNALS) | |
| 588 if (IsRegistered()) { | |
| 589 SamplerManager::instance()->RemoveSampler(this); | |
| 590 } | |
| 591 #endif | |
| 592 delete data_; | |
| 593 } | |
| 594 | |
| 595 void Sampler::Start() { | |
| 596 DCHECK(!IsActive()); | |
| 597 SetActive(true); | |
| 598 #if defined(USE_SIGNALS) | |
| 599 SamplerManager::instance()->AddSampler(this); | |
| 600 #endif | |
| 601 } | |
| 602 | |
| 603 | |
| 604 void Sampler::Stop() { | |
| 605 #if defined(USE_SIGNALS) | |
| 606 SamplerManager::instance()->RemoveSampler(this); | |
| 607 #endif | |
| 608 DCHECK(IsActive()); | |
| 609 SetActive(false); | |
| 610 SetRegistered(false); | |
| 611 } | |
| 612 | |
| 613 | |
| 614 void Sampler::IncreaseProfilingDepth() { | |
| 615 base::NoBarrier_AtomicIncrement(&profiling_, 1); | |
| 616 #if defined(USE_SIGNALS) | |
| 617 SignalHandler::IncreaseSamplerCount(); | |
| 618 #endif | |
| 619 } | |
| 620 | |
| 621 | |
| 622 void Sampler::DecreaseProfilingDepth() { | |
| 623 #if defined(USE_SIGNALS) | |
| 624 SignalHandler::DecreaseSamplerCount(); | |
| 625 #endif | |
| 626 base::NoBarrier_AtomicIncrement(&profiling_, -1); | |
| 627 } | |
| 628 | |
| 629 | |
| 630 #if defined(USE_SIGNALS) | |
| 631 | |
| 632 void Sampler::DoSample() { | |
| 633 if (!SignalHandler::Installed()) return; | |
| 634 if (!IsActive() && !IsRegistered()) { | |
| 635 SamplerManager::instance()->AddSampler(this); | |
| 636 SetRegistered(true); | |
| 637 } | |
| 638 pthread_kill(platform_data()->vm_tid(), SIGPROF); | |
| 639 } | |
| 640 | |
| 641 #elif V8_OS_WIN || V8_OS_CYGWIN | |
| 642 | |
| 643 void Sampler::DoSample() { | |
| 644 HANDLE profiled_thread = platform_data()->profiled_thread(); | |
| 645 if (profiled_thread == nullptr) return; | |
| 646 | |
| 647 const DWORD kSuspendFailed = static_cast<DWORD>(-1); | |
| 648 if (SuspendThread(profiled_thread) == kSuspendFailed) return; | |
| 649 | |
| 650 // Context used for sampling the register state of the profiled thread. | |
| 651 CONTEXT context; | |
| 652 memset(&context, 0, sizeof(context)); | |
| 653 context.ContextFlags = CONTEXT_FULL; | |
| 654 if (GetThreadContext(profiled_thread, &context) != 0) { | |
| 655 v8::RegisterState state; | |
| 656 #if V8_HOST_ARCH_X64 | |
| 657 state.pc = reinterpret_cast<void*>(context.Rip); | |
| 658 state.sp = reinterpret_cast<void*>(context.Rsp); | |
| 659 state.fp = reinterpret_cast<void*>(context.Rbp); | |
| 660 #else | |
| 661 state.pc = reinterpret_cast<void*>(context.Eip); | |
| 662 state.sp = reinterpret_cast<void*>(context.Esp); | |
| 663 state.fp = reinterpret_cast<void*>(context.Ebp); | |
| 664 #endif | |
| 665 SampleStack(state); | |
| 666 } | |
| 667 ResumeThread(profiled_thread); | |
| 668 } | |
| 669 | |
| 670 #endif // USE_SIGNALS | |
| 671 | |
| 672 } // namespace sampler | |
| 673 } // namespace v8 | |
| OLD | NEW |