Chromium Code Reviews| Index: src/libsampler/v8-sampler.cc |
| diff --git a/src/profiler/sampler.cc b/src/libsampler/v8-sampler.cc |
| similarity index 37% |
| copy from src/profiler/sampler.cc |
| copy to src/libsampler/v8-sampler.cc |
| index 860fc34378025c27af77cddb25998e8526017754..c6d0889305f3b5dbce5ee3d9e738943ed8b79adb 100644 |
| --- a/src/profiler/sampler.cc |
| +++ b/src/libsampler/v8-sampler.cc |
| @@ -1,8 +1,8 @@ |
| -// Copyright 2013 the V8 project authors. All rights reserved. |
| +// Copyright 2016 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| -#include "src/profiler/sampler.h" |
|
jochen (gone - plz use gerrit)
2016/05/04 07:29:07
please add a DEPS file to this directory that cont
lpy
2016/05/05 22:24:17
Will do after finishing moving atomic-utils.h into
|
| +#include "src/libsampler/v8-sampler.h" |
| #if V8_OS_POSIX && !V8_OS_CYGWIN |
| @@ -44,13 +44,9 @@ |
| #include "src/atomic-utils.h" |
|
jochen (gone - plz use gerrit)
2016/05/04 07:29:07
I think this file can be moved to src/base
lpy
2016/05/05 22:24:17
atomic-utils.h will be moved into base/ in this CL
|
| #include "src/base/platform/platform.h" |
| -#include "src/flags.h" |
| -#include "src/frames-inl.h" |
| -#include "src/log.h" |
| -#include "src/profiler/cpu-profiler-inl.h" |
| -#include "src/simulator.h" |
| -#include "src/v8threads.h" |
| -#include "src/vm-state-inl.h" |
| +#include "src/hashmap.h" |
| +#include "src/list-inl.h" |
|
jochen (gone - plz use gerrit)
2016/05/04 07:29:07
please use stl container instead
lpy
2016/05/05 22:24:17
Done.
|
| +#include "src/isolate.h" |
|
jochen (gone - plz use gerrit)
2016/05/04 07:29:07
we should only use v8::Isolate via include/v8.h
lpy
2016/05/05 22:24:17
Done.
|
| #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) |
| @@ -158,91 +154,29 @@ enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 }; |
| namespace v8 { |
| -namespace internal { |
| +namespace sampler { |
| namespace { |
| -class PlatformDataCommon : public Malloced { |
| +class PlatformDataCommon : public i::Malloced { |
| public: |
| - PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {} |
| - ThreadId profiled_thread_id() { return profiled_thread_id_; } |
| + PlatformDataCommon() : profiled_thread_id_(i::ThreadId::Current()) {} |
| + i::ThreadId profiled_thread_id() { return profiled_thread_id_; } |
| protected: |
| ~PlatformDataCommon() {} |
| private: |
| - ThreadId profiled_thread_id_; |
| + i::ThreadId profiled_thread_id_; |
| }; |
| -bool IsSamePage(byte* ptr1, byte* ptr2) { |
| - const uint32_t kPageSize = 4096; |
| - uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1); |
| - return (reinterpret_cast<uintptr_t>(ptr1) & mask) == |
| - (reinterpret_cast<uintptr_t>(ptr2) & mask); |
| -} |
| - |
| - |
| -// Check if the code at specified address could potentially be a |
| -// frame setup code. |
| -bool IsNoFrameRegion(Address address) { |
| - struct Pattern { |
| - int bytes_count; |
| - byte bytes[8]; |
| - int offsets[4]; |
| - }; |
| - byte* pc = reinterpret_cast<byte*>(address); |
| - static Pattern patterns[] = { |
| -#if V8_HOST_ARCH_IA32 |
| - // push %ebp |
| - // mov %esp,%ebp |
| - {3, {0x55, 0x89, 0xe5}, {0, 1, -1}}, |
| - // pop %ebp |
| - // ret N |
| - {2, {0x5d, 0xc2}, {0, 1, -1}}, |
| - // pop %ebp |
| - // ret |
| - {2, {0x5d, 0xc3}, {0, 1, -1}}, |
| -#elif V8_HOST_ARCH_X64 |
| - // pushq %rbp |
| - // movq %rsp,%rbp |
| - {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}}, |
| - // popq %rbp |
| - // ret N |
| - {2, {0x5d, 0xc2}, {0, 1, -1}}, |
| - // popq %rbp |
| - // ret |
| - {2, {0x5d, 0xc3}, {0, 1, -1}}, |
| -#endif |
| - {0, {}, {}} |
| - }; |
| - for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) { |
| - for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) { |
| - int offset = *offset_ptr; |
| - if (!offset || IsSamePage(pc, pc - offset)) { |
| - MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count); |
| - if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count)) |
| - return true; |
| - } else { |
| - // It is not safe to examine bytes on another page as it might not be |
| - // allocated thus causing a SEGFAULT. |
| - // Check the pattern part that's on the same page and |
| - // pessimistically assume it could be the entire pattern match. |
| - MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset); |
| - if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset)) |
| - return true; |
| - } |
| - } |
| - } |
| - return false; |
| -} |
| - |
| -typedef List<Sampler*> SamplerList; |
| - |
| #if defined(USE_SIGNALS) |
| +typedef internal::List<Sampler*> SamplerList; |
| + |
| class AtomicGuard { |
| public: |
| - explicit AtomicGuard(AtomicValue<int>* atomic, bool is_block = true) |
| + explicit AtomicGuard(i::AtomicValue<int>* atomic, bool is_block = true) |
| : atomic_(atomic), |
| is_success_(false) { |
| do { |
| @@ -262,7 +196,7 @@ class AtomicGuard { |
| } |
| private: |
| - AtomicValue<int>* atomic_; |
| + i::AtomicValue<int>* atomic_; |
| bool is_success_; |
| }; |
| @@ -296,6 +230,65 @@ class Sampler::PlatformData : public PlatformDataCommon { |
| pthread_t vm_tid_; |
| }; |
| + |
| +class SamplerManager { |
| + public: |
| + static void AddSampler(Sampler* sampler) { |
| + AtomicGuard atomic_guard(&samplers_access_counter_); |
| + DCHECK(sampler->IsActive()); |
| + // Add sampler into map if needed. |
| + pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| + i::HashMap::Entry *entry = |
| + thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id), |
| + ThreadHash(thread_id)); |
| + if (entry->value == NULL) { |
| + SamplerList* samplers = new SamplerList(); |
| + samplers->Add(sampler); |
| + entry->value = samplers; |
| + } else { |
| + SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| + if (!samplers->Contains(sampler)) { |
| + samplers->Add(sampler); |
| + } |
| + } |
| + } |
| + |
| + static void RemoveSampler(Sampler* sampler) { |
| + AtomicGuard atomic_guard(&samplers_access_counter_); |
| + DCHECK(sampler->IsActive()); |
| + // Remove sampler from map. |
| + pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| + void* thread_key = ThreadKey(thread_id); |
| + uint32_t thread_hash = ThreadHash(thread_id); |
| + i::HashMap::Entry* entry = |
| + thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash); |
| + DCHECK(entry != NULL); |
| + SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| + samplers->RemoveElement(sampler); |
| + if (samplers->is_empty()) { |
| + thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash); |
| + delete samplers; |
| + } |
| + } |
| + |
| + private: |
| + struct HashMapCreateTrait { |
| + static void Construct(internal::HashMap* allocated_ptr) { |
| + new (allocated_ptr) internal::HashMap(internal::HashMap::PointersMatch); |
| + } |
| + }; |
| + friend class SignalHandler; |
| + static base::LazyInstance<internal::HashMap, HashMapCreateTrait>::type |
| + thread_id_to_samplers_; |
| + static i::AtomicValue<int> samplers_access_counter_; |
| +}; |
| + |
| + |
| +base::LazyInstance<i::HashMap, SamplerManager::HashMapCreateTrait>::type |
| + SamplerManager::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER; |
| +i::AtomicValue<int> SamplerManager::samplers_access_counter_(0); |
| + |
| + |
| #elif V8_OS_WIN || V8_OS_CYGWIN |
| // ---------------------------------------------------------------------------- |
| @@ -328,67 +321,11 @@ class Sampler::PlatformData : public PlatformDataCommon { |
| private: |
| HANDLE profiled_thread_; |
| }; |
| -#endif |
| - |
| - |
| -#if defined(USE_SIMULATOR) |
| -bool SimulatorHelper::FillRegisters(Isolate* isolate, |
| - v8::RegisterState* state) { |
| - Simulator *simulator = isolate->thread_local_top()->simulator_; |
| - // Check if there is active simulator. |
| - if (simulator == NULL) return false; |
| -#if V8_TARGET_ARCH_ARM |
| - if (!simulator->has_bad_pc()) { |
| - state->pc = reinterpret_cast<Address>(simulator->get_pc()); |
| - } |
| - state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp)); |
| - state->fp = reinterpret_cast<Address>(simulator->get_register( |
| - Simulator::r11)); |
| -#elif V8_TARGET_ARCH_ARM64 |
| - state->pc = reinterpret_cast<Address>(simulator->pc()); |
| - state->sp = reinterpret_cast<Address>(simulator->sp()); |
| - state->fp = reinterpret_cast<Address>(simulator->fp()); |
| -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 |
| - if (!simulator->has_bad_pc()) { |
| - state->pc = reinterpret_cast<Address>(simulator->get_pc()); |
| - } |
| - state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp)); |
| - state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp)); |
| -#elif V8_TARGET_ARCH_PPC |
| - if (!simulator->has_bad_pc()) { |
| - state->pc = reinterpret_cast<Address>(simulator->get_pc()); |
| - } |
| - state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp)); |
| - state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp)); |
| -#elif V8_TARGET_ARCH_S390 |
| - if (!simulator->has_bad_pc()) { |
| - state->pc = reinterpret_cast<Address>(simulator->get_pc()); |
| - } |
| - state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp)); |
| - state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp)); |
| -#endif |
| - if (state->sp == 0 || state->fp == 0) { |
| - // It possible that the simulator is interrupted while it is updating |
| - // the sp or fp register. ARM64 simulator does this in two steps: |
| - // first setting it to zero and then setting it to the new value. |
| - // Bailout if sp/fp doesn't contain the new value. |
| - // |
| - // FIXME: The above doesn't really solve the issue. |
| - // If a 64-bit target is executed on a 32-bit host even the final |
| - // write is non-atomic, so it might obtain a half of the result. |
| - // Moreover as long as the register set code uses memcpy (as of now), |
| - // it is not guaranteed to be atomic even when both host and target |
| - // are of same bitness. |
| - return false; |
| - } |
| - return true; |
| -} |
| -#endif // USE_SIMULATOR |
| +#endif // USE_SIGNALS |
| #if defined(USE_SIGNALS) |
| - |
| -class SignalHandler : public AllStatic { |
| +class SignalHandler : public i::AllStatic { |
| public: |
| static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } |
| static void TearDown() { delete mutex_; mutex_ = NULL; } |
| @@ -407,10 +344,6 @@ class SignalHandler : public AllStatic { |
| return signal_handler_installed_; |
| } |
| -#if !V8_OS_NACL |
| - static void CollectSample(void* context, Sampler* sampler); |
| -#endif |
| - |
| private: |
| static void Install() { |
| #if !V8_OS_NACL |
| @@ -424,7 +357,7 @@ class SignalHandler : public AllStatic { |
| #endif |
| signal_handler_installed_ = |
| (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); |
| -#endif |
| +#endif // !V8_OS_NACL |
| } |
| static void Restore() { |
| @@ -437,6 +370,7 @@ class SignalHandler : public AllStatic { |
| } |
| #if !V8_OS_NACL |
| + static void FillRegisterState(void* context, RegisterState* regs); |
| static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); |
| #endif |
| // Protects the process wide state below. |
| @@ -455,26 +389,41 @@ bool SignalHandler::signal_handler_installed_ = false; |
| // As Native Client does not support signal handling, profiling is disabled. |
| #if !V8_OS_NACL |
| -void SignalHandler::CollectSample(void* context, Sampler* sampler) { |
| - if (sampler == NULL || (!sampler->IsProfiling() && |
| - !sampler->IsRegistered())) { |
| +void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, |
| + void* context) { |
| + USE(info); |
| + if (signal != SIGPROF) return; |
| + AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false); |
| + if (!atomic_guard.is_success()) return; |
| + pthread_t thread_id = pthread_self(); |
| + i::HashMap::Entry* entry = |
| + SamplerManager::thread_id_to_samplers_.Pointer()->Lookup( |
| + ThreadKey(thread_id), ThreadHash(thread_id)); |
| + if (entry == NULL) |
| return; |
| - } |
| - Isolate* isolate = sampler->isolate(); |
| + SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| + v8::RegisterState state; |
| + SignalHandler::FillRegisterState(context, &state); |
| - // We require a fully initialized and entered isolate. |
| - if (isolate == NULL || !isolate->IsInUse()) return; |
| + for (int i = 0; i < samplers->length(); ++i) { |
| + Sampler* sampler = samplers->at(i); |
| + if (sampler == NULL || !sampler->IsProfiling()) { |
| + return; |
| + } |
| + Isolate* isolate = sampler->isolate(); |
| - if (v8::Locker::IsActive() && |
| - !isolate->thread_manager()->IsLockedByCurrentThread()) { |
| - return; |
| - } |
| + // We require a fully initialized and entered isolate. |
| + if (isolate == NULL || !isolate->IsInUse()) return; |
| - v8::RegisterState state; |
| + if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) { |
| + return; |
| + } |
| -#if defined(USE_SIMULATOR) |
| - if (!SimulatorHelper::FillRegisters(isolate, &state)) return; |
| -#else |
| + sampler->SampleStack(state); |
| + } |
| +} |
| + |
| +void SignalHandler::FillRegisterState(void* context, RegisterState* state) { |
| // Extracting the sample from the context is extremely machine dependent. |
| ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) |
| @@ -482,474 +431,177 @@ void SignalHandler::CollectSample(void* context, Sampler* sampler) { |
| #endif |
| #if V8_OS_LINUX |
| #if V8_HOST_ARCH_IA32 |
| - state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]); |
| #elif V8_HOST_ARCH_X64 |
| - state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]); |
| #elif V8_HOST_ARCH_ARM |
| #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) |
| // Old GLibc ARM versions used a gregs[] array to access the register |
| // values from mcontext_t. |
| - state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]); |
| + state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]); |
| #else |
| - state.pc = reinterpret_cast<Address>(mcontext.arm_pc); |
| - state.sp = reinterpret_cast<Address>(mcontext.arm_sp); |
| - state.fp = reinterpret_cast<Address>(mcontext.arm_fp); |
| + state->pc = reinterpret_cast<void*>(mcontext.arm_pc); |
| + state->sp = reinterpret_cast<void*>(mcontext.arm_sp); |
| + state->fp = reinterpret_cast<void*>(mcontext.arm_fp); |
| #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) |
| #elif V8_HOST_ARCH_ARM64 |
| - state.pc = reinterpret_cast<Address>(mcontext.pc); |
| - state.sp = reinterpret_cast<Address>(mcontext.sp); |
| + state->pc = reinterpret_cast<void*>(mcontext.pc); |
| + state->sp = reinterpret_cast<void*>(mcontext.sp); |
| // FP is an alias for x29. |
| - state.fp = reinterpret_cast<Address>(mcontext.regs[29]); |
| + state->fp = reinterpret_cast<void*>(mcontext.regs[29]); |
| #elif V8_HOST_ARCH_MIPS |
| - state.pc = reinterpret_cast<Address>(mcontext.pc); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); |
| + state->pc = reinterpret_cast<void*>(mcontext.pc); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[29]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[30]); |
| #elif V8_HOST_ARCH_MIPS64 |
| - state.pc = reinterpret_cast<Address>(mcontext.pc); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); |
| + state->pc = reinterpret_cast<void*>(mcontext.pc); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[29]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[30]); |
| #elif V8_HOST_ARCH_PPC |
| - state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip); |
| - state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]); |
| - state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]); |
| + state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip); |
| + state->sp = |
| + reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]); |
| + state->fp = |
| + reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]); |
| #elif V8_HOST_ARCH_S390 |
| #if V8_TARGET_ARCH_32_BIT |
| // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing |
| // mode. This bit needs to be masked out to resolve actual address. |
| - state.pc = |
| - reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); |
| + state->pc = |
| + reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); |
| #else |
| - state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr); |
| + state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr); |
| #endif // V8_TARGET_ARCH_32_BIT |
| - state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]); |
| - state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]); |
| + state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]); |
| + state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]); |
| #endif // V8_HOST_ARCH_* |
| #elif V8_OS_MACOSX |
| #if V8_HOST_ARCH_X64 |
| #if __DARWIN_UNIX03 |
| - state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip); |
| - state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp); |
| - state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp); |
| + state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip); |
| + state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp); |
| + state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp); |
| #else // !__DARWIN_UNIX03 |
| - state.pc = reinterpret_cast<Address>(mcontext->ss.rip); |
| - state.sp = reinterpret_cast<Address>(mcontext->ss.rsp); |
| - state.fp = reinterpret_cast<Address>(mcontext->ss.rbp); |
| + state->pc = reinterpret_cast<void*>(mcontext->ss.rip); |
| + state->sp = reinterpret_cast<void*>(mcontext->ss.rsp); |
| + state->fp = reinterpret_cast<void*>(mcontext->ss.rbp); |
| #endif // __DARWIN_UNIX03 |
| #elif V8_HOST_ARCH_IA32 |
| #if __DARWIN_UNIX03 |
| - state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip); |
| - state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp); |
| - state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp); |
| + state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip); |
| + state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp); |
| + state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp); |
| #else // !__DARWIN_UNIX03 |
| - state.pc = reinterpret_cast<Address>(mcontext->ss.eip); |
| - state.sp = reinterpret_cast<Address>(mcontext->ss.esp); |
| - state.fp = reinterpret_cast<Address>(mcontext->ss.ebp); |
| + state->pc = reinterpret_cast<void*>(mcontext->ss.eip); |
| + state->sp = reinterpret_cast<void*>(mcontext->ss.esp); |
| + state->fp = reinterpret_cast<void*>(mcontext->ss.ebp); |
| #endif // __DARWIN_UNIX03 |
| #endif // V8_HOST_ARCH_IA32 |
| #elif V8_OS_FREEBSD |
| #if V8_HOST_ARCH_IA32 |
| - state.pc = reinterpret_cast<Address>(mcontext.mc_eip); |
| - state.sp = reinterpret_cast<Address>(mcontext.mc_esp); |
| - state.fp = reinterpret_cast<Address>(mcontext.mc_ebp); |
| + state->pc = reinterpret_cast<void*>(mcontext.mc_eip); |
| + state->sp = reinterpret_cast<void*>(mcontext.mc_esp); |
| + state->fp = reinterpret_cast<void*>(mcontext.mc_ebp); |
| #elif V8_HOST_ARCH_X64 |
| - state.pc = reinterpret_cast<Address>(mcontext.mc_rip); |
| - state.sp = reinterpret_cast<Address>(mcontext.mc_rsp); |
| - state.fp = reinterpret_cast<Address>(mcontext.mc_rbp); |
| + state->pc = reinterpret_cast<void*>(mcontext.mc_rip); |
| + state->sp = reinterpret_cast<void*>(mcontext.mc_rsp); |
| + state->fp = reinterpret_cast<void*>(mcontext.mc_rbp); |
| #elif V8_HOST_ARCH_ARM |
| - state.pc = reinterpret_cast<Address>(mcontext.mc_r15); |
| - state.sp = reinterpret_cast<Address>(mcontext.mc_r13); |
| - state.fp = reinterpret_cast<Address>(mcontext.mc_r11); |
| + state->pc = reinterpret_cast<void*>(mcontext.mc_r15); |
| + state->sp = reinterpret_cast<void*>(mcontext.mc_r13); |
| + state->fp = reinterpret_cast<void*>(mcontext.mc_r11); |
| #endif // V8_HOST_ARCH_* |
| #elif V8_OS_NETBSD |
| #if V8_HOST_ARCH_IA32 |
| - state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]); |
| - state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]); |
| + state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]); |
| #elif V8_HOST_ARCH_X64 |
| - state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]); |
| - state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]); |
| + state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]); |
| #endif // V8_HOST_ARCH_* |
| #elif V8_OS_OPENBSD |
| #if V8_HOST_ARCH_IA32 |
| - state.pc = reinterpret_cast<Address>(ucontext->sc_eip); |
| - state.sp = reinterpret_cast<Address>(ucontext->sc_esp); |
| - state.fp = reinterpret_cast<Address>(ucontext->sc_ebp); |
| + state->pc = reinterpret_cast<void*>(ucontext->sc_eip); |
| + state->sp = reinterpret_cast<void*>(ucontext->sc_esp); |
| + state->fp = reinterpret_cast<void*>(ucontext->sc_ebp); |
| #elif V8_HOST_ARCH_X64 |
| - state.pc = reinterpret_cast<Address>(ucontext->sc_rip); |
| - state.sp = reinterpret_cast<Address>(ucontext->sc_rsp); |
| - state.fp = reinterpret_cast<Address>(ucontext->sc_rbp); |
| + state->pc = reinterpret_cast<void*>(ucontext->sc_rip); |
| + state->sp = reinterpret_cast<void*>(ucontext->sc_rsp); |
| + state->fp = reinterpret_cast<void*>(ucontext->sc_rbp); |
| #endif // V8_HOST_ARCH_* |
| #elif V8_OS_SOLARIS |
| - state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); |
| - state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]); |
| + state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]); |
| #elif V8_OS_QNX |
| #if V8_HOST_ARCH_IA32 |
| - state.pc = reinterpret_cast<Address>(mcontext.cpu.eip); |
| - state.sp = reinterpret_cast<Address>(mcontext.cpu.esp); |
| - state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp); |
| + state->pc = reinterpret_cast<void*>(mcontext.cpu.eip); |
| + state->sp = reinterpret_cast<void*>(mcontext.cpu.esp); |
| + state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp); |
| #elif V8_HOST_ARCH_ARM |
| - state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]); |
| - state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]); |
| - state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]); |
| + state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]); |
| + state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]); |
| + state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]); |
| #endif // V8_HOST_ARCH_* |
| #elif V8_OS_AIX |
| - state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar); |
| - state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]); |
| - state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]); |
| + state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar); |
| + state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]); |
| + state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]); |
| #endif // V8_OS_AIX |
| -#endif // USE_SIMULATOR |
| - sampler->SampleStack(state); |
| } |
| -#endif // V8_OS_NACL |
| - |
| -#endif // USE_SIGNALS |
| - |
| - |
| -class SamplerThread : public base::Thread { |
| - public: |
| - static const int kSamplerThreadStackSize = 64 * KB; |
| - |
| - explicit SamplerThread(int interval) |
| - : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)), |
| - interval_(interval) {} |
| - |
| - static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } |
| - static void TearDown() { delete mutex_; mutex_ = NULL; } |
| - |
| - static void AddActiveSampler(Sampler* sampler) { |
| - bool need_to_start = false; |
| - base::LockGuard<base::Mutex> lock_guard(mutex_); |
| - if (instance_ == NULL) { |
| - // Start a thread that will send SIGPROF signal to VM threads, |
| - // when CPU profiling will be enabled. |
| - instance_ = new SamplerThread(sampler->interval()); |
| - need_to_start = true; |
| - } |
| - |
| - DCHECK(sampler->IsActive()); |
| - DCHECK(instance_->interval_ == sampler->interval()); |
| - |
| -#if defined(USE_SIGNALS) |
| - AddSampler(sampler); |
| -#else |
| - DCHECK(!instance_->active_samplers_.Contains(sampler)); |
| - instance_->active_samplers_.Add(sampler); |
| -#endif // USE_SIGNALS |
| - |
| - if (need_to_start) instance_->StartSynchronously(); |
| - } |
| - |
| - static void RemoveSampler(Sampler* sampler) { |
| - SamplerThread* instance_to_remove = NULL; |
| - { |
| - base::LockGuard<base::Mutex> lock_guard(mutex_); |
| - |
| - DCHECK(sampler->IsActive() || sampler->IsRegistered()); |
| -#if defined(USE_SIGNALS) |
| - { |
| - AtomicGuard atomic_guard(&sampler_list_access_counter_); |
| - // Remove sampler from map. |
| - pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| - void* thread_key = ThreadKey(thread_id); |
| - uint32_t thread_hash = ThreadHash(thread_id); |
| - HashMap::Entry* entry = |
| - thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash); |
| - DCHECK(entry != NULL); |
| - SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| - samplers->RemoveElement(sampler); |
| - if (samplers->is_empty()) { |
| - thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash); |
| - delete samplers; |
| - } |
| - if (thread_id_to_samplers_.Get().occupancy() == 0) { |
| - instance_to_remove = instance_; |
| - instance_ = NULL; |
| - } |
| - } |
| -#else |
| - bool removed = instance_->active_samplers_.RemoveElement(sampler); |
| - DCHECK(removed); |
| - USE(removed); |
| - |
| - // We cannot delete the instance immediately as we need to Join() the |
| - // thread but we are holding mutex_ and the thread may try to acquire it. |
| - if (instance_->active_samplers_.is_empty()) { |
| - instance_to_remove = instance_; |
| - instance_ = NULL; |
| - } |
| -#endif // USE_SIGNALS |
| - } |
| - |
| - if (!instance_to_remove) return; |
| - instance_to_remove->Join(); |
| - delete instance_to_remove; |
| - } |
| - |
| - // Unlike AddActiveSampler, this method only adds a sampler, |
| - // but won't start the sampler thread. |
| - static void RegisterSampler(Sampler* sampler) { |
| - base::LockGuard<base::Mutex> lock_guard(mutex_); |
| -#if defined(USE_SIGNALS) |
| - AddSampler(sampler); |
| -#endif // USE_SIGNALS |
| - } |
| - |
| - // Implement Thread::Run(). |
| - virtual void Run() { |
| - while (true) { |
| - { |
| - base::LockGuard<base::Mutex> lock_guard(mutex_); |
| -#if defined(USE_SIGNALS) |
| - if (thread_id_to_samplers_.Get().occupancy() == 0) break; |
| - if (SignalHandler::Installed()) { |
| - for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start(); |
| - p != NULL; p = thread_id_to_samplers_.Get().Next(p)) { |
| - pthread_t thread_id = reinterpret_cast<pthread_t>(p->key); |
| - pthread_kill(thread_id, SIGPROF); |
| - } |
| - } |
| -#else |
| - if (active_samplers_.is_empty()) break; |
| - // When CPU profiling is enabled both JavaScript and C++ code is |
| - // profiled. We must not suspend. |
| - for (int i = 0; i < active_samplers_.length(); ++i) { |
| - Sampler* sampler = active_samplers_.at(i); |
| - if (!sampler->IsProfiling()) continue; |
| - sampler->DoSample(); |
| - } |
| -#endif // USE_SIGNALS |
| - } |
| - base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_)); |
| - } |
| - } |
| - |
| - private: |
| - // Protects the process wide state below. |
| - static base::Mutex* mutex_; |
| - static SamplerThread* instance_; |
| - |
| - const int interval_; |
| - |
| -#if defined(USE_SIGNALS) |
| - struct HashMapCreateTrait { |
| - static void Construct(HashMap* allocated_ptr) { |
| - new (allocated_ptr) HashMap(HashMap::PointersMatch); |
| - } |
| - }; |
| - friend class SignalHandler; |
| - static base::LazyInstance<HashMap, HashMapCreateTrait>::type |
| - thread_id_to_samplers_; |
| - static AtomicValue<int> sampler_list_access_counter_; |
| - static void AddSampler(Sampler* sampler) { |
| - AtomicGuard atomic_guard(&sampler_list_access_counter_); |
| - // Add sampler into map if needed. |
| - pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| - HashMap::Entry *entry = |
| - thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id), |
| - ThreadHash(thread_id)); |
| - if (entry->value == NULL) { |
| - SamplerList* samplers = new SamplerList(); |
| - samplers->Add(sampler); |
| - entry->value = samplers; |
| - } else { |
| - SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| - if (!samplers->Contains(sampler)) { |
| - samplers->Add(sampler); |
| - } |
| - } |
| - } |
| -#else |
| - SamplerList active_samplers_; |
| -#endif // USE_SIGNALS |
| - |
| - DISALLOW_COPY_AND_ASSIGN(SamplerThread); |
| -}; |
| - |
| -base::Mutex* SamplerThread::mutex_ = NULL; |
| -SamplerThread* SamplerThread::instance_ = NULL; |
| -#if defined(USE_SIGNALS) |
| -base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type |
| - SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER; |
| -AtomicValue<int> SamplerThread::sampler_list_access_counter_(0); |
| - |
| -// As Native Client does not support signal handling, profiling is disabled. |
| -#if !V8_OS_NACL |
| -void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, |
| - void* context) { |
| - USE(info); |
| - if (signal != SIGPROF) return; |
| - AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false); |
| - if (!atomic_guard.is_success()) return; |
| - pthread_t thread_id = pthread_self(); |
| - HashMap::Entry* entry = |
| - SamplerThread::thread_id_to_samplers_.Pointer()->Lookup( |
| - ThreadKey(thread_id), ThreadHash(thread_id)); |
| - if (entry == NULL) |
| - return; |
| - SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| - for (int i = 0; i < samplers->length(); ++i) { |
| - Sampler* sampler = samplers->at(i); |
| - CollectSample(context, sampler); |
| - } |
| -} |
| #endif // !V8_OS_NACL |
| -#endif // USE_SIGNALs |
| - |
| - |
| -// |
| -// StackTracer implementation |
| -// |
| -DISABLE_ASAN void TickSample::Init(Isolate* isolate, |
| - const v8::RegisterState& regs, |
| - RecordCEntryFrame record_c_entry_frame, |
| - bool update_stats) { |
| - timestamp = base::TimeTicks::HighResolutionNow(); |
| - pc = reinterpret_cast<Address>(regs.pc); |
| - state = isolate->current_vm_state(); |
| - this->update_stats = update_stats; |
| - |
| - // Avoid collecting traces while doing GC. |
| - if (state == GC) return; |
| - |
| - Address js_entry_sp = isolate->js_entry_sp(); |
| - if (js_entry_sp == 0) return; // Not executing JS now. |
| - |
| - if (pc && IsNoFrameRegion(pc)) { |
| - // Can't collect stack. Mark the sample as spoiled. |
| - timestamp = base::TimeTicks(); |
| - pc = 0; |
| - return; |
| - } |
| - ExternalCallbackScope* scope = isolate->external_callback_scope(); |
| - Address handler = Isolate::handler(isolate->thread_local_top()); |
| - // If there is a handler on top of the external callback scope then |
| - // we have already entrered JavaScript again and the external callback |
| - // is not the top function. |
| - if (scope && scope->scope_address() < handler) { |
| - external_callback_entry = *scope->callback_entrypoint_address(); |
| - has_external_callback = true; |
| - } else { |
| - // sp register may point at an arbitrary place in memory, make |
| - // sure MSAN doesn't complain about it. |
| - MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address)); |
| - // Sample potential return address value for frameless invocation of |
| - // stubs (we'll figure out later, if this value makes sense). |
| - tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp)); |
| - has_external_callback = false; |
| - } |
| - |
| - SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp), |
| - reinterpret_cast<Address>(regs.sp), js_entry_sp); |
| - top_frame_type = it.top_frame_type(); |
| - |
| - SampleInfo info; |
| - GetStackSample(isolate, regs, record_c_entry_frame, |
| - reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info); |
| - frames_count = static_cast<unsigned>(info.frames_count); |
| - if (!frames_count) { |
| - // It is executing JS but failed to collect a stack trace. |
| - // Mark the sample as spoiled. |
| - timestamp = base::TimeTicks(); |
| - pc = 0; |
| - } |
| -} |
| - |
| - |
| -void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs, |
| - RecordCEntryFrame record_c_entry_frame, |
| - void** frames, size_t frames_limit, |
| - v8::SampleInfo* sample_info) { |
| - sample_info->frames_count = 0; |
| - sample_info->vm_state = isolate->current_vm_state(); |
| - if (sample_info->vm_state == GC) return; |
| - |
| - Address js_entry_sp = isolate->js_entry_sp(); |
| - if (js_entry_sp == 0) return; // Not executing JS now. |
| - |
| - SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp), |
| - reinterpret_cast<Address>(regs.sp), js_entry_sp); |
| - size_t i = 0; |
| - if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() && |
| - it.top_frame_type() == StackFrame::EXIT) { |
| - frames[i++] = isolate->c_function(); |
| - } |
| - while (!it.done() && i < frames_limit) { |
| - if (it.frame()->is_interpreted()) { |
| - // For interpreted frames use the bytecode array pointer as the pc. |
| - InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame()); |
| - // Since the sampler can interrupt execution at any point the |
| - // bytecode_array might be garbage, so don't dereference it. |
| - Address bytecode_array = |
| - reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag; |
| - frames[i++] = bytecode_array + BytecodeArray::kHeaderSize + |
| - frame->GetBytecodeOffset(); |
| - } else { |
| - frames[i++] = it.frame()->pc(); |
| - } |
| - it.Advance(); |
| - } |
| - sample_info->frames_count = i; |
| -} |
| +#endif // USE_SIGNALS |
| void Sampler::SetUp() { |
| #if defined(USE_SIGNALS) |
| SignalHandler::SetUp(); |
| #endif |
| - SamplerThread::SetUp(); |
| } |
| void Sampler::TearDown() { |
| - SamplerThread::TearDown(); |
| #if defined(USE_SIGNALS) |
| SignalHandler::TearDown(); |
| #endif |
| } |
| -Sampler::Sampler(Isolate* isolate, int interval) |
| - : isolate_(isolate), |
| - interval_(interval), |
| - profiling_(false), |
| - has_processing_thread_(false), |
| - active_(false), |
| - registered_(false), |
| - is_counting_samples_(false), |
| +Sampler::Sampler(Isolate* isolate) |
| + : is_counting_samples_(false), |
| js_sample_count_(0), |
| - external_sample_count_(0) { |
| + external_sample_count_(0), |
| + isolate_(isolate), |
| + profiling_(false), |
| + active_(false) { |
| data_ = new PlatformData; |
| } |
| Sampler::~Sampler() { |
| DCHECK(!IsActive()); |
| - if (IsRegistered()) { |
| - SamplerThread::RemoveSampler(this); |
| - } |
| delete data_; |
| } |
| void Sampler::Start() { |
| DCHECK(!IsActive()); |
| SetActive(true); |
| - SamplerThread::AddActiveSampler(this); |
| + SamplerManager::AddSampler(this); |
| } |
| void Sampler::Stop() { |
| + SamplerManager::RemoveSampler(this); |
| DCHECK(IsActive()); |
| - SamplerThread::RemoveSampler(this); |
| SetActive(false); |
| - SetRegistered(false); |
| } |
| @@ -969,30 +621,10 @@ void Sampler::DecreaseProfilingDepth() { |
| } |
| -void Sampler::SampleStack(const v8::RegisterState& state) { |
| - TickSample* sample = isolate_->cpu_profiler()->StartTickSample(); |
| - TickSample sample_obj; |
| - if (sample == NULL) sample = &sample_obj; |
| - sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true); |
| - if (is_counting_samples_ && !sample->timestamp.IsNull()) { |
| - if (sample->state == JS) ++js_sample_count_; |
| - if (sample->state == EXTERNAL) ++external_sample_count_; |
| - } |
| - Tick(sample); |
| - if (sample != &sample_obj) { |
| - isolate_->cpu_profiler()->FinishTickSample(); |
| - } |
| -} |
| - |
| - |
| #if defined(USE_SIGNALS) |
| void Sampler::DoSample() { |
| if (!SignalHandler::Installed()) return; |
| - if (!IsActive() && !IsRegistered()) { |
| - SamplerThread::RegisterSampler(this); |
| - SetRegistered(true); |
| - } |
| pthread_kill(platform_data()->vm_tid(), SIGPROF); |
| } |
| @@ -1011,22 +643,15 @@ void Sampler::DoSample() { |
| context.ContextFlags = CONTEXT_FULL; |
| if (GetThreadContext(profiled_thread, &context) != 0) { |
| v8::RegisterState state; |
| -#if defined(USE_SIMULATOR) |
| - if (!SimulatorHelper::FillRegisters(isolate(), &state)) { |
| - ResumeThread(profiled_thread); |
| - return; |
| - } |
| -#else |
| #if V8_HOST_ARCH_X64 |
| - state.pc = reinterpret_cast<Address>(context.Rip); |
| - state.sp = reinterpret_cast<Address>(context.Rsp); |
| - state.fp = reinterpret_cast<Address>(context.Rbp); |
| + state.pc = reinterpret_cast<void*>(context.Rip); |
| + state.sp = reinterpret_cast<void*>(context.Rsp); |
| + state.fp = reinterpret_cast<void*>(context.Rbp); |
| #else |
| - state.pc = reinterpret_cast<Address>(context.Eip); |
| - state.sp = reinterpret_cast<Address>(context.Esp); |
| - state.fp = reinterpret_cast<Address>(context.Ebp); |
| + state.pc = reinterpret_cast<void*>(context.Eip); |
| + state.sp = reinterpret_cast<void*>(context.Esp); |
| + state.fp = reinterpret_cast<void*>(context.Ebp); |
| #endif |
| -#endif // USE_SIMULATOR |
| SampleStack(state); |
| } |
| ResumeThread(profiled_thread); |
| @@ -1034,6 +659,5 @@ void Sampler::DoSample() { |
| #endif // USE_SIGNALS |
| - |
| -} // namespace internal |
| +} // namespace sampler |
| } // namespace v8 |