OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/profiler/sampler.h" |
| 6 |
| 7 #if V8_OS_POSIX && !V8_OS_CYGWIN |
| 8 |
| 9 #define USE_SIGNALS |
| 10 |
| 11 #include <errno.h> |
| 12 #include <pthread.h> |
| 13 #include <signal.h> |
| 14 #include <sys/time.h> |
| 15 |
| 16 #if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX |
| 17 #include <sys/syscall.h> // NOLINT |
| 18 #endif |
| 19 |
| 20 #if V8_OS_MACOSX |
| 21 #include <mach/mach.h> |
| 22 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h> |
| 23 // and is a typedef for struct sigcontext. There is no uc_mcontext. |
| 24 #elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \ |
| 25 !V8_OS_OPENBSD && !V8_OS_NACL |
| 26 #include <ucontext.h> |
| 27 #endif |
| 28 |
| 29 #include <unistd.h> |
| 30 |
| 31 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'. |
| 32 // Old versions of the C library <signal.h> didn't define the type. |
| 33 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \ |
| 34 (defined(__arm__) || defined(__aarch64__)) && \ |
| 35 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) |
| 36 #include <asm/sigcontext.h> // NOLINT |
| 37 #endif |
| 38 |
| 39 #elif V8_OS_WIN || V8_OS_CYGWIN |
| 40 |
| 41 #include "src/base/win32-headers.h" |
| 42 |
| 43 #endif |
| 44 |
| 45 #include "src/base/atomic-utils.h" |
| 46 #include "src/base/platform/platform.h" |
| 47 #include "src/profiler/cpu-profiler-inl.h" |
| 48 #include "src/profiler/tick-sample.h" |
| 49 #include "src/simulator.h" |
| 50 #include "src/v8threads.h" |
| 51 |
| 52 |
| 53 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) |
| 54 |
| 55 // Not all versions of Android's C library provide ucontext_t. |
| 56 // Detect this and provide custom but compatible definitions. Note that these |
| 57 // follow the GLibc naming convention to access register values from |
| 58 // mcontext_t. |
| 59 // |
| 60 // See http://code.google.com/p/android/issues/detail?id=34784 |
| 61 |
| 62 #if defined(__arm__) |
| 63 |
| 64 typedef struct sigcontext mcontext_t; |
| 65 |
| 66 typedef struct ucontext { |
| 67 uint32_t uc_flags; |
| 68 struct ucontext* uc_link; |
| 69 stack_t uc_stack; |
| 70 mcontext_t uc_mcontext; |
| 71 // Other fields are not used by V8, don't define them here. |
| 72 } ucontext_t; |
| 73 |
| 74 #elif defined(__aarch64__) |
| 75 |
| 76 typedef struct sigcontext mcontext_t; |
| 77 |
| 78 typedef struct ucontext { |
| 79 uint64_t uc_flags; |
| 80 struct ucontext *uc_link; |
| 81 stack_t uc_stack; |
| 82 mcontext_t uc_mcontext; |
| 83 // Other fields are not used by V8, don't define them here. |
| 84 } ucontext_t; |
| 85 |
| 86 #elif defined(__mips__) |
| 87 // MIPS version of sigcontext, for Android bionic. |
| 88 typedef struct { |
| 89 uint32_t regmask; |
| 90 uint32_t status; |
| 91 uint64_t pc; |
| 92 uint64_t gregs[32]; |
| 93 uint64_t fpregs[32]; |
| 94 uint32_t acx; |
| 95 uint32_t fpc_csr; |
| 96 uint32_t fpc_eir; |
| 97 uint32_t used_math; |
| 98 uint32_t dsp; |
| 99 uint64_t mdhi; |
| 100 uint64_t mdlo; |
| 101 uint32_t hi1; |
| 102 uint32_t lo1; |
| 103 uint32_t hi2; |
| 104 uint32_t lo2; |
| 105 uint32_t hi3; |
| 106 uint32_t lo3; |
| 107 } mcontext_t; |
| 108 |
| 109 typedef struct ucontext { |
| 110 uint32_t uc_flags; |
| 111 struct ucontext* uc_link; |
| 112 stack_t uc_stack; |
| 113 mcontext_t uc_mcontext; |
| 114 // Other fields are not used by V8, don't define them here. |
| 115 } ucontext_t; |
| 116 |
| 117 #elif defined(__i386__) |
| 118 // x86 version for Android. |
| 119 typedef struct { |
| 120 uint32_t gregs[19]; |
| 121 void* fpregs; |
| 122 uint32_t oldmask; |
| 123 uint32_t cr2; |
| 124 } mcontext_t; |
| 125 |
| 126 typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks |
| 127 typedef struct ucontext { |
| 128 uint32_t uc_flags; |
| 129 struct ucontext* uc_link; |
| 130 stack_t uc_stack; |
| 131 mcontext_t uc_mcontext; |
| 132 // Other fields are not used by V8, don't define them here. |
| 133 } ucontext_t; |
| 134 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 }; |
| 135 |
| 136 #elif defined(__x86_64__) |
| 137 // x64 version for Android. |
| 138 typedef struct { |
| 139 uint64_t gregs[23]; |
| 140 void* fpregs; |
| 141 uint64_t __reserved1[8]; |
| 142 } mcontext_t; |
| 143 |
| 144 typedef struct ucontext { |
| 145 uint64_t uc_flags; |
| 146 struct ucontext *uc_link; |
| 147 stack_t uc_stack; |
| 148 mcontext_t uc_mcontext; |
| 149 // Other fields are not used by V8, don't define them here. |
| 150 } ucontext_t; |
| 151 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 }; |
| 152 #endif |
| 153 |
| 154 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) |
| 155 |
| 156 |
| 157 namespace v8 { |
| 158 namespace internal { |
| 159 |
| 160 namespace { |
| 161 |
| 162 class PlatformDataCommon : public Malloced { |
| 163 public: |
| 164 PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {} |
| 165 ThreadId profiled_thread_id() { return profiled_thread_id_; } |
| 166 |
| 167 protected: |
| 168 ~PlatformDataCommon() {} |
| 169 |
| 170 private: |
| 171 ThreadId profiled_thread_id_; |
| 172 }; |
| 173 |
| 174 |
| 175 typedef List<Sampler*> SamplerList; |
| 176 |
| 177 #if defined(USE_SIGNALS) |
| 178 class AtomicGuard { |
| 179 public: |
| 180 explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true) |
| 181 : atomic_(atomic), |
| 182 is_success_(false) { |
| 183 do { |
| 184 // Use Acquire_Load to gain mutual exclusion. |
| 185 USE(atomic_->Value()); |
| 186 is_success_ = atomic_->TrySetValue(0, 1); |
| 187 } while (is_block && !is_success_); |
| 188 } |
| 189 |
| 190 bool is_success() { return is_success_; } |
| 191 |
| 192 ~AtomicGuard() { |
| 193 if (is_success_) { |
| 194 atomic_->SetValue(0); |
| 195 } |
| 196 atomic_ = NULL; |
| 197 } |
| 198 |
| 199 private: |
| 200 base::AtomicValue<int>* atomic_; |
| 201 bool is_success_; |
| 202 }; |
| 203 |
| 204 |
| 205 // Returns key for hash map. |
| 206 void* ThreadKey(pthread_t thread_id) { |
| 207 return reinterpret_cast<void*>(thread_id); |
| 208 } |
| 209 |
| 210 |
| 211 // Returns hash value for hash map. |
| 212 uint32_t ThreadHash(pthread_t thread_id) { |
| 213 #if V8_OS_MACOSX |
| 214 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id)); |
| 215 #else |
| 216 return static_cast<uint32_t>(thread_id); |
| 217 #endif |
| 218 } |
| 219 #endif // USE_SIGNALS |
| 220 |
| 221 } // namespace |
| 222 |
| 223 #if defined(USE_SIGNALS) |
| 224 |
| 225 class Sampler::PlatformData : public PlatformDataCommon { |
| 226 public: |
| 227 PlatformData() : vm_tid_(pthread_self()) {} |
| 228 pthread_t vm_tid() const { return vm_tid_; } |
| 229 |
| 230 private: |
| 231 pthread_t vm_tid_; |
| 232 }; |
| 233 |
| 234 #elif V8_OS_WIN || V8_OS_CYGWIN |
| 235 |
| 236 // ---------------------------------------------------------------------------- |
| 237 // Win32 profiler support. On Cygwin we use the same sampler implementation as |
| 238 // on Win32. |
| 239 |
| 240 class Sampler::PlatformData : public PlatformDataCommon { |
| 241 public: |
| 242 // Get a handle to the calling thread. This is the thread that we are |
| 243 // going to profile. We need to make a copy of the handle because we are |
| 244 // going to use it in the sampler thread. Using GetThreadHandle() will |
| 245 // not work in this case. We're using OpenThread because DuplicateHandle |
| 246 // for some reason doesn't work in Chrome's sandbox. |
| 247 PlatformData() |
| 248 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | |
| 249 THREAD_SUSPEND_RESUME | |
| 250 THREAD_QUERY_INFORMATION, |
| 251 false, |
| 252 GetCurrentThreadId())) {} |
| 253 |
| 254 ~PlatformData() { |
| 255 if (profiled_thread_ != NULL) { |
| 256 CloseHandle(profiled_thread_); |
| 257 profiled_thread_ = NULL; |
| 258 } |
| 259 } |
| 260 |
| 261 HANDLE profiled_thread() { return profiled_thread_; } |
| 262 |
| 263 private: |
| 264 HANDLE profiled_thread_; |
| 265 }; |
| 266 #endif |
| 267 |
| 268 |
| 269 #if defined(USE_SIGNALS) |
| 270 |
| 271 class SignalHandler : public AllStatic { |
| 272 public: |
| 273 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } |
| 274 static void TearDown() { delete mutex_; mutex_ = NULL; } |
| 275 |
| 276 static void IncreaseSamplerCount() { |
| 277 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 278 if (++client_count_ == 1) Install(); |
| 279 } |
| 280 |
| 281 static void DecreaseSamplerCount() { |
| 282 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 283 if (--client_count_ == 0) Restore(); |
| 284 } |
| 285 |
| 286 static bool Installed() { |
| 287 return signal_handler_installed_; |
| 288 } |
| 289 |
| 290 #if !V8_OS_NACL |
| 291 static void CollectSample(void* context, Sampler* sampler); |
| 292 #endif |
| 293 |
| 294 private: |
| 295 static void Install() { |
| 296 #if !V8_OS_NACL |
| 297 struct sigaction sa; |
| 298 sa.sa_sigaction = &HandleProfilerSignal; |
| 299 sigemptyset(&sa.sa_mask); |
| 300 #if V8_OS_QNX |
| 301 sa.sa_flags = SA_SIGINFO; |
| 302 #else |
| 303 sa.sa_flags = SA_RESTART | SA_SIGINFO; |
| 304 #endif |
| 305 signal_handler_installed_ = |
| 306 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); |
| 307 #endif |
| 308 } |
| 309 |
| 310 static void Restore() { |
| 311 #if !V8_OS_NACL |
| 312 if (signal_handler_installed_) { |
| 313 sigaction(SIGPROF, &old_signal_handler_, 0); |
| 314 signal_handler_installed_ = false; |
| 315 } |
| 316 #endif |
| 317 } |
| 318 |
| 319 #if !V8_OS_NACL |
| 320 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); |
| 321 #endif |
| 322 // Protects the process wide state below. |
| 323 static base::Mutex* mutex_; |
| 324 static int client_count_; |
| 325 static bool signal_handler_installed_; |
| 326 static struct sigaction old_signal_handler_; |
| 327 }; |
| 328 |
| 329 |
| 330 base::Mutex* SignalHandler::mutex_ = NULL; |
| 331 int SignalHandler::client_count_ = 0; |
| 332 struct sigaction SignalHandler::old_signal_handler_; |
| 333 bool SignalHandler::signal_handler_installed_ = false; |
| 334 |
| 335 |
| 336 // As Native Client does not support signal handling, profiling is disabled. |
| 337 #if !V8_OS_NACL |
| 338 void SignalHandler::CollectSample(void* context, Sampler* sampler) { |
| 339 if (sampler == NULL || (!sampler->IsProfiling() && |
| 340 !sampler->IsRegistered())) { |
| 341 return; |
| 342 } |
| 343 Isolate* isolate = sampler->isolate(); |
| 344 |
| 345 // We require a fully initialized and entered isolate. |
| 346 if (isolate == NULL || !isolate->IsInUse()) return; |
| 347 |
| 348 if (v8::Locker::IsActive() && |
| 349 !isolate->thread_manager()->IsLockedByCurrentThread()) { |
| 350 return; |
| 351 } |
| 352 |
| 353 v8::RegisterState state; |
| 354 |
| 355 #if defined(USE_SIMULATOR) |
| 356 if (!SimulatorHelper::FillRegisters(isolate, &state)) return; |
| 357 #else |
| 358 // Extracting the sample from the context is extremely machine dependent. |
| 359 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| 360 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) |
| 361 mcontext_t& mcontext = ucontext->uc_mcontext; |
| 362 #endif |
| 363 #if V8_OS_LINUX |
| 364 #if V8_HOST_ARCH_IA32 |
| 365 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); |
| 366 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); |
| 367 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); |
| 368 #elif V8_HOST_ARCH_X64 |
| 369 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); |
| 370 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); |
| 371 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); |
| 372 #elif V8_HOST_ARCH_ARM |
| 373 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) |
| 374 // Old GLibc ARM versions used a gregs[] array to access the register |
| 375 // values from mcontext_t. |
| 376 state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]); |
| 377 state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]); |
| 378 state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]); |
| 379 #else |
| 380 state.pc = reinterpret_cast<Address>(mcontext.arm_pc); |
| 381 state.sp = reinterpret_cast<Address>(mcontext.arm_sp); |
| 382 state.fp = reinterpret_cast<Address>(mcontext.arm_fp); |
| 383 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) |
| 384 #elif V8_HOST_ARCH_ARM64 |
| 385 state.pc = reinterpret_cast<Address>(mcontext.pc); |
| 386 state.sp = reinterpret_cast<Address>(mcontext.sp); |
| 387 // FP is an alias for x29. |
| 388 state.fp = reinterpret_cast<Address>(mcontext.regs[29]); |
| 389 #elif V8_HOST_ARCH_MIPS |
| 390 state.pc = reinterpret_cast<Address>(mcontext.pc); |
| 391 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); |
| 392 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); |
| 393 #elif V8_HOST_ARCH_MIPS64 |
| 394 state.pc = reinterpret_cast<Address>(mcontext.pc); |
| 395 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); |
| 396 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); |
| 397 #elif V8_HOST_ARCH_PPC |
| 398 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip); |
| 399 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]); |
| 400 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]); |
| 401 #elif V8_HOST_ARCH_S390 |
| 402 #if V8_TARGET_ARCH_32_BIT |
| 403 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing |
| 404 // mode. This bit needs to be masked out to resolve actual address. |
| 405 state.pc = |
| 406 reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); |
| 407 #else |
| 408 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr); |
| 409 #endif // V8_TARGET_ARCH_32_BIT |
| 410 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]); |
| 411 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]); |
| 412 #endif // V8_HOST_ARCH_* |
| 413 #elif V8_OS_MACOSX |
| 414 #if V8_HOST_ARCH_X64 |
| 415 #if __DARWIN_UNIX03 |
| 416 state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip); |
| 417 state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp); |
| 418 state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp); |
| 419 #else // !__DARWIN_UNIX03 |
| 420 state.pc = reinterpret_cast<Address>(mcontext->ss.rip); |
| 421 state.sp = reinterpret_cast<Address>(mcontext->ss.rsp); |
| 422 state.fp = reinterpret_cast<Address>(mcontext->ss.rbp); |
| 423 #endif // __DARWIN_UNIX03 |
| 424 #elif V8_HOST_ARCH_IA32 |
| 425 #if __DARWIN_UNIX03 |
| 426 state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip); |
| 427 state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp); |
| 428 state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp); |
| 429 #else // !__DARWIN_UNIX03 |
| 430 state.pc = reinterpret_cast<Address>(mcontext->ss.eip); |
| 431 state.sp = reinterpret_cast<Address>(mcontext->ss.esp); |
| 432 state.fp = reinterpret_cast<Address>(mcontext->ss.ebp); |
| 433 #endif // __DARWIN_UNIX03 |
| 434 #endif // V8_HOST_ARCH_IA32 |
| 435 #elif V8_OS_FREEBSD |
| 436 #if V8_HOST_ARCH_IA32 |
| 437 state.pc = reinterpret_cast<Address>(mcontext.mc_eip); |
| 438 state.sp = reinterpret_cast<Address>(mcontext.mc_esp); |
| 439 state.fp = reinterpret_cast<Address>(mcontext.mc_ebp); |
| 440 #elif V8_HOST_ARCH_X64 |
| 441 state.pc = reinterpret_cast<Address>(mcontext.mc_rip); |
| 442 state.sp = reinterpret_cast<Address>(mcontext.mc_rsp); |
| 443 state.fp = reinterpret_cast<Address>(mcontext.mc_rbp); |
| 444 #elif V8_HOST_ARCH_ARM |
| 445 state.pc = reinterpret_cast<Address>(mcontext.mc_r15); |
| 446 state.sp = reinterpret_cast<Address>(mcontext.mc_r13); |
| 447 state.fp = reinterpret_cast<Address>(mcontext.mc_r11); |
| 448 #endif // V8_HOST_ARCH_* |
| 449 #elif V8_OS_NETBSD |
| 450 #if V8_HOST_ARCH_IA32 |
| 451 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]); |
| 452 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]); |
| 453 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]); |
| 454 #elif V8_HOST_ARCH_X64 |
| 455 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]); |
| 456 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]); |
| 457 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]); |
| 458 #endif // V8_HOST_ARCH_* |
| 459 #elif V8_OS_OPENBSD |
| 460 #if V8_HOST_ARCH_IA32 |
| 461 state.pc = reinterpret_cast<Address>(ucontext->sc_eip); |
| 462 state.sp = reinterpret_cast<Address>(ucontext->sc_esp); |
| 463 state.fp = reinterpret_cast<Address>(ucontext->sc_ebp); |
| 464 #elif V8_HOST_ARCH_X64 |
| 465 state.pc = reinterpret_cast<Address>(ucontext->sc_rip); |
| 466 state.sp = reinterpret_cast<Address>(ucontext->sc_rsp); |
| 467 state.fp = reinterpret_cast<Address>(ucontext->sc_rbp); |
| 468 #endif // V8_HOST_ARCH_* |
| 469 #elif V8_OS_SOLARIS |
| 470 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); |
| 471 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); |
| 472 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); |
| 473 #elif V8_OS_QNX |
| 474 #if V8_HOST_ARCH_IA32 |
| 475 state.pc = reinterpret_cast<Address>(mcontext.cpu.eip); |
| 476 state.sp = reinterpret_cast<Address>(mcontext.cpu.esp); |
| 477 state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp); |
| 478 #elif V8_HOST_ARCH_ARM |
| 479 state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]); |
| 480 state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]); |
| 481 state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]); |
| 482 #endif // V8_HOST_ARCH_* |
| 483 #elif V8_OS_AIX |
| 484 state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar); |
| 485 state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]); |
| 486 state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]); |
| 487 #endif // V8_OS_AIX |
| 488 #endif // USE_SIMULATOR |
| 489 sampler->SampleStack(state); |
| 490 } |
| 491 #endif // V8_OS_NACL |
| 492 |
| 493 #endif // USE_SIGNALS |
| 494 |
| 495 |
| 496 class SamplerThread : public base::Thread { |
| 497 public: |
| 498 static const int kSamplerThreadStackSize = 64 * KB; |
| 499 |
| 500 explicit SamplerThread(int interval) |
| 501 : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)), |
| 502 interval_(interval) {} |
| 503 |
| 504 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } |
| 505 static void TearDown() { delete mutex_; mutex_ = NULL; } |
| 506 |
| 507 static void AddActiveSampler(Sampler* sampler) { |
| 508 bool need_to_start = false; |
| 509 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 510 if (instance_ == NULL) { |
| 511 // Start a thread that will send SIGPROF signal to VM threads, |
| 512 // when CPU profiling will be enabled. |
| 513 instance_ = new SamplerThread(sampler->interval()); |
| 514 need_to_start = true; |
| 515 } |
| 516 |
| 517 DCHECK(sampler->IsActive()); |
| 518 DCHECK(instance_->interval_ == sampler->interval()); |
| 519 |
| 520 #if defined(USE_SIGNALS) |
| 521 AddSampler(sampler); |
| 522 #else |
| 523 DCHECK(!instance_->active_samplers_.Contains(sampler)); |
| 524 instance_->active_samplers_.Add(sampler); |
| 525 #endif // USE_SIGNALS |
| 526 |
| 527 if (need_to_start) instance_->StartSynchronously(); |
| 528 } |
| 529 |
| 530 static void RemoveSampler(Sampler* sampler) { |
| 531 SamplerThread* instance_to_remove = NULL; |
| 532 { |
| 533 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 534 |
| 535 DCHECK(sampler->IsActive() || sampler->IsRegistered()); |
| 536 #if defined(USE_SIGNALS) |
| 537 { |
| 538 AtomicGuard atomic_guard(&sampler_list_access_counter_); |
| 539 // Remove sampler from map. |
| 540 pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| 541 void* thread_key = ThreadKey(thread_id); |
| 542 uint32_t thread_hash = ThreadHash(thread_id); |
| 543 HashMap::Entry* entry = |
| 544 thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash); |
| 545 DCHECK(entry != NULL); |
| 546 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| 547 samplers->RemoveElement(sampler); |
| 548 if (samplers->is_empty()) { |
| 549 thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash); |
| 550 delete samplers; |
| 551 } |
| 552 if (thread_id_to_samplers_.Get().occupancy() == 0) { |
| 553 instance_to_remove = instance_; |
| 554 instance_ = NULL; |
| 555 } |
| 556 } |
| 557 #else |
| 558 bool removed = instance_->active_samplers_.RemoveElement(sampler); |
| 559 DCHECK(removed); |
| 560 USE(removed); |
| 561 |
| 562 // We cannot delete the instance immediately as we need to Join() the |
| 563 // thread but we are holding mutex_ and the thread may try to acquire it. |
| 564 if (instance_->active_samplers_.is_empty()) { |
| 565 instance_to_remove = instance_; |
| 566 instance_ = NULL; |
| 567 } |
| 568 #endif // USE_SIGNALS |
| 569 } |
| 570 |
| 571 if (!instance_to_remove) return; |
| 572 instance_to_remove->Join(); |
| 573 delete instance_to_remove; |
| 574 } |
| 575 |
| 576 // Unlike AddActiveSampler, this method only adds a sampler, |
| 577 // but won't start the sampler thread. |
| 578 static void RegisterSampler(Sampler* sampler) { |
| 579 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 580 #if defined(USE_SIGNALS) |
| 581 AddSampler(sampler); |
| 582 #endif // USE_SIGNALS |
| 583 } |
| 584 |
| 585 // Implement Thread::Run(). |
| 586 virtual void Run() { |
| 587 while (true) { |
| 588 { |
| 589 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 590 #if defined(USE_SIGNALS) |
| 591 if (thread_id_to_samplers_.Get().occupancy() == 0) break; |
| 592 if (SignalHandler::Installed()) { |
| 593 for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start(); |
| 594 p != NULL; p = thread_id_to_samplers_.Get().Next(p)) { |
| 595 #if V8_OS_AIX && V8_TARGET_ARCH_PPC64 |
| 596 // on AIX64, cannot cast (void *) to pthread_t which is |
| 597 // of type unsigned int (4bytes) |
| 598 pthread_t thread_id = reinterpret_cast<intptr_t>(p->key); |
| 599 #else |
| 600 pthread_t thread_id = reinterpret_cast<pthread_t>(p->key); |
| 601 #endif |
| 602 pthread_kill(thread_id, SIGPROF); |
| 603 } |
| 604 } |
| 605 #else |
| 606 if (active_samplers_.is_empty()) break; |
| 607 // When CPU profiling is enabled both JavaScript and C++ code is |
| 608 // profiled. We must not suspend. |
| 609 for (int i = 0; i < active_samplers_.length(); ++i) { |
| 610 Sampler* sampler = active_samplers_.at(i); |
| 611 if (!sampler->IsProfiling()) continue; |
| 612 sampler->DoSample(); |
| 613 } |
| 614 #endif // USE_SIGNALS |
| 615 } |
| 616 base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_)); |
| 617 } |
| 618 } |
| 619 |
| 620 private: |
| 621 // Protects the process wide state below. |
| 622 static base::Mutex* mutex_; |
| 623 static SamplerThread* instance_; |
| 624 |
| 625 const int interval_; |
| 626 |
| 627 #if defined(USE_SIGNALS) |
| 628 struct HashMapCreateTrait { |
| 629 static void Construct(HashMap* allocated_ptr) { |
| 630 new (allocated_ptr) HashMap(HashMap::PointersMatch); |
| 631 } |
| 632 }; |
| 633 friend class SignalHandler; |
| 634 static base::LazyInstance<HashMap, HashMapCreateTrait>::type |
| 635 thread_id_to_samplers_; |
| 636 static base::AtomicValue<int> sampler_list_access_counter_; |
| 637 static void AddSampler(Sampler* sampler) { |
| 638 AtomicGuard atomic_guard(&sampler_list_access_counter_); |
| 639 // Add sampler into map if needed. |
| 640 pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| 641 HashMap::Entry *entry = |
| 642 thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id), |
| 643 ThreadHash(thread_id)); |
| 644 if (entry->value == NULL) { |
| 645 SamplerList* samplers = new SamplerList(); |
| 646 samplers->Add(sampler); |
| 647 entry->value = samplers; |
| 648 } else { |
| 649 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| 650 if (!samplers->Contains(sampler)) { |
| 651 samplers->Add(sampler); |
| 652 } |
| 653 } |
| 654 } |
| 655 #else |
| 656 SamplerList active_samplers_; |
| 657 #endif // USE_SIGNALS |
| 658 |
| 659 DISALLOW_COPY_AND_ASSIGN(SamplerThread); |
| 660 }; |
| 661 |
| 662 |
| 663 base::Mutex* SamplerThread::mutex_ = NULL; |
| 664 SamplerThread* SamplerThread::instance_ = NULL; |
| 665 #if defined(USE_SIGNALS) |
| 666 base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type |
| 667 SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER; |
| 668 base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0); |
| 669 |
| 670 // As Native Client does not support signal handling, profiling is disabled. |
| 671 #if !V8_OS_NACL |
| 672 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, |
| 673 void* context) { |
| 674 USE(info); |
| 675 if (signal != SIGPROF) return; |
| 676 AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false); |
| 677 if (!atomic_guard.is_success()) return; |
| 678 pthread_t thread_id = pthread_self(); |
| 679 HashMap::Entry* entry = |
| 680 SamplerThread::thread_id_to_samplers_.Pointer()->Lookup( |
| 681 ThreadKey(thread_id), ThreadHash(thread_id)); |
| 682 if (entry == NULL) |
| 683 return; |
| 684 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| 685 for (int i = 0; i < samplers->length(); ++i) { |
| 686 Sampler* sampler = samplers->at(i); |
| 687 CollectSample(context, sampler); |
| 688 } |
| 689 } |
| 690 #endif // !V8_OS_NACL |
| 691 #endif // USE_SIGNALs |
| 692 |
| 693 |
| 694 void Sampler::SetUp() { |
| 695 #if defined(USE_SIGNALS) |
| 696 SignalHandler::SetUp(); |
| 697 #endif |
| 698 SamplerThread::SetUp(); |
| 699 } |
| 700 |
| 701 |
| 702 void Sampler::TearDown() { |
| 703 SamplerThread::TearDown(); |
| 704 #if defined(USE_SIGNALS) |
| 705 SignalHandler::TearDown(); |
| 706 #endif |
| 707 } |
| 708 |
| 709 Sampler::Sampler(Isolate* isolate, int interval) |
| 710 : isolate_(isolate), |
| 711 interval_(interval), |
| 712 profiling_(false), |
| 713 has_processing_thread_(false), |
| 714 active_(false), |
| 715 registered_(false), |
| 716 is_counting_samples_(false), |
| 717 js_sample_count_(0), |
| 718 external_sample_count_(0) { |
| 719 data_ = new PlatformData; |
| 720 } |
| 721 |
| 722 Sampler::~Sampler() { |
| 723 DCHECK(!IsActive()); |
| 724 if (IsRegistered()) { |
| 725 SamplerThread::RemoveSampler(this); |
| 726 } |
| 727 delete data_; |
| 728 } |
| 729 |
| 730 void Sampler::Start() { |
| 731 DCHECK(!IsActive()); |
| 732 SetActive(true); |
| 733 SamplerThread::AddActiveSampler(this); |
| 734 } |
| 735 |
| 736 |
| 737 void Sampler::Stop() { |
| 738 DCHECK(IsActive()); |
| 739 SamplerThread::RemoveSampler(this); |
| 740 SetActive(false); |
| 741 SetRegistered(false); |
| 742 } |
| 743 |
| 744 |
| 745 void Sampler::IncreaseProfilingDepth() { |
| 746 base::NoBarrier_AtomicIncrement(&profiling_, 1); |
| 747 #if defined(USE_SIGNALS) |
| 748 SignalHandler::IncreaseSamplerCount(); |
| 749 #endif |
| 750 } |
| 751 |
| 752 |
| 753 void Sampler::DecreaseProfilingDepth() { |
| 754 #if defined(USE_SIGNALS) |
| 755 SignalHandler::DecreaseSamplerCount(); |
| 756 #endif |
| 757 base::NoBarrier_AtomicIncrement(&profiling_, -1); |
| 758 } |
| 759 |
| 760 |
| 761 void Sampler::SampleStack(const v8::RegisterState& state) { |
| 762 TickSample* sample = isolate_->cpu_profiler()->StartTickSample(); |
| 763 TickSample sample_obj; |
| 764 if (sample == NULL) sample = &sample_obj; |
| 765 sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true); |
| 766 if (is_counting_samples_ && !sample->timestamp.IsNull()) { |
| 767 if (sample->state == JS) ++js_sample_count_; |
| 768 if (sample->state == EXTERNAL) ++external_sample_count_; |
| 769 } |
| 770 Tick(sample); |
| 771 if (sample != &sample_obj) { |
| 772 isolate_->cpu_profiler()->FinishTickSample(); |
| 773 } |
| 774 } |
| 775 |
| 776 |
| 777 #if defined(USE_SIGNALS) |
| 778 |
| 779 void Sampler::DoSample() { |
| 780 if (!SignalHandler::Installed()) return; |
| 781 if (!IsActive() && !IsRegistered()) { |
| 782 SamplerThread::RegisterSampler(this); |
| 783 SetRegistered(true); |
| 784 } |
| 785 pthread_kill(platform_data()->vm_tid(), SIGPROF); |
| 786 } |
| 787 |
| 788 #elif V8_OS_WIN || V8_OS_CYGWIN |
| 789 |
| 790 void Sampler::DoSample() { |
| 791 HANDLE profiled_thread = platform_data()->profiled_thread(); |
| 792 if (profiled_thread == NULL) return; |
| 793 |
| 794 const DWORD kSuspendFailed = static_cast<DWORD>(-1); |
| 795 if (SuspendThread(profiled_thread) == kSuspendFailed) return; |
| 796 |
| 797 // Context used for sampling the register state of the profiled thread. |
| 798 CONTEXT context; |
| 799 memset(&context, 0, sizeof(context)); |
| 800 context.ContextFlags = CONTEXT_FULL; |
| 801 if (GetThreadContext(profiled_thread, &context) != 0) { |
| 802 v8::RegisterState state; |
| 803 #if defined(USE_SIMULATOR) |
| 804 if (!SimulatorHelper::FillRegisters(isolate(), &state)) { |
| 805 ResumeThread(profiled_thread); |
| 806 return; |
| 807 } |
| 808 #else |
| 809 #if V8_HOST_ARCH_X64 |
| 810 state.pc = reinterpret_cast<Address>(context.Rip); |
| 811 state.sp = reinterpret_cast<Address>(context.Rsp); |
| 812 state.fp = reinterpret_cast<Address>(context.Rbp); |
| 813 #else |
| 814 state.pc = reinterpret_cast<Address>(context.Eip); |
| 815 state.sp = reinterpret_cast<Address>(context.Esp); |
| 816 state.fp = reinterpret_cast<Address>(context.Ebp); |
| 817 #endif |
| 818 #endif // USE_SIMULATOR |
| 819 SampleStack(state); |
| 820 } |
| 821 ResumeThread(profiled_thread); |
| 822 } |
| 823 |
| 824 #endif // USE_SIGNALS |
| 825 |
| 826 |
| 827 } // namespace internal |
| 828 } // namespace v8 |
OLD | NEW |