| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/libsampler/v8-sampler.h" | 5 #include "src/libsampler/v8-sampler.h" |
| 6 | 6 |
| 7 #if V8_OS_POSIX && !V8_OS_CYGWIN | 7 #if V8_OS_POSIX && !V8_OS_CYGWIN |
| 8 | 8 |
| 9 #define USE_SIGNALS | 9 #define USE_SIGNALS |
| 10 | 10 |
| (...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 156 | 156 |
| 157 | 157 |
| 158 namespace v8 { | 158 namespace v8 { |
| 159 namespace sampler { | 159 namespace sampler { |
| 160 | 160 |
| 161 namespace { | 161 namespace { |
| 162 | 162 |
| 163 #if defined(USE_SIGNALS) | 163 #if defined(USE_SIGNALS) |
| 164 typedef std::vector<Sampler*> SamplerList; | 164 typedef std::vector<Sampler*> SamplerList; |
| 165 typedef SamplerList::iterator SamplerListIterator; | 165 typedef SamplerList::iterator SamplerListIterator; |
| 166 typedef base::AtomicValue<bool> AtomicMutex; |
| 166 | 167 |
| 167 class AtomicGuard { | 168 class AtomicGuard { |
| 168 public: | 169 public: |
| 169 explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true) | 170 explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true) |
| 170 : atomic_(atomic), | 171 : atomic_(atomic), is_success_(false) { |
| 171 is_success_(false) { | |
| 172 do { | 172 do { |
| 173 // Use Acquire_Load to gain mutual exclusion. | 173 // Use Acquire_Load to gain mutual exclusion. |
| 174 USE(atomic_->Value()); | 174 USE(atomic_->Value()); |
| 175 is_success_ = atomic_->TrySetValue(0, 1); | 175 is_success_ = atomic_->TrySetValue(false, true); |
| 176 } while (is_block && !is_success_); | 176 } while (is_blocking && !is_success_); |
| 177 } | 177 } |
| 178 | 178 |
| 179 bool is_success() { return is_success_; } | 179 bool is_success() const { return is_success_; } |
| 180 | 180 |
| 181 ~AtomicGuard() { | 181 ~AtomicGuard() { |
| 182 if (is_success_) { | 182 if (!is_success_) return; |
| 183 atomic_->SetValue(0); | 183 atomic_->SetValue(false); |
| 184 } | |
| 185 atomic_ = NULL; | |
| 186 } | 184 } |
| 187 | 185 |
| 188 private: | 186 private: |
| 189 base::AtomicValue<int>* atomic_; | 187 AtomicMutex* const atomic_; |
| 190 bool is_success_; | 188 bool is_success_; |
| 191 }; | 189 }; |
| 192 | 190 |
| 193 | |
| 194 // Returns key for hash map. | 191 // Returns key for hash map. |
| 195 void* ThreadKey(pthread_t thread_id) { | 192 void* ThreadKey(pthread_t thread_id) { |
| 196 return reinterpret_cast<void*>(thread_id); | 193 return reinterpret_cast<void*>(thread_id); |
| 197 } | 194 } |
| 198 | 195 |
| 199 | |
| 200 // Returns hash value for hash map. | 196 // Returns hash value for hash map. |
| 201 uint32_t ThreadHash(pthread_t thread_id) { | 197 uint32_t ThreadHash(pthread_t thread_id) { |
| 202 #if V8_OS_MACOSX | 198 #if V8_OS_MACOSX |
| 203 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id)); | 199 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id)); |
| 204 #else | 200 #else |
| 205 return static_cast<uint32_t>(thread_id); | 201 return static_cast<uint32_t>(thread_id); |
| 206 #endif | 202 #endif |
| 207 } | 203 } |
| 208 | 204 |
| 209 #endif // USE_SIGNALS | 205 #endif // USE_SIGNALS |
| 210 | 206 |
| 211 } // namespace | 207 } // namespace |
| 212 | 208 |
| 213 #if defined(USE_SIGNALS) | 209 #if defined(USE_SIGNALS) |
| 214 | 210 |
| 215 class Sampler::PlatformData { | 211 class Sampler::PlatformData { |
| 216 public: | 212 public: |
| 217 PlatformData() : vm_tid_(pthread_self()) {} | 213 PlatformData() : vm_tid_(pthread_self()) {} |
| 218 pthread_t vm_tid() const { return vm_tid_; } | 214 pthread_t vm_tid() const { return vm_tid_; } |
| 219 | 215 |
| 220 private: | 216 private: |
| 221 pthread_t vm_tid_; | 217 pthread_t vm_tid_; |
| 222 }; | 218 }; |
| 223 | 219 |
| 224 | |
| 225 class SamplerManager { | 220 class SamplerManager { |
| 226 public: | 221 public: |
| 227 static void AddSampler(Sampler* sampler) { | 222 SamplerManager() : sampler_map_(HashMap::PointersMatch) {} |
| 223 |
| 224 void AddSampler(Sampler* sampler) { |
| 228 AtomicGuard atomic_guard(&samplers_access_counter_); | 225 AtomicGuard atomic_guard(&samplers_access_counter_); |
| 229 DCHECK(sampler->IsActive() || !sampler->IsRegistered()); | 226 DCHECK(sampler->IsActive() || !sampler->IsRegistered()); |
| 230 // Add sampler into map if needed. | 227 // Add sampler into map if needed. |
| 231 pthread_t thread_id = sampler->platform_data()->vm_tid(); | 228 pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| 232 HashMap::Entry* entry = | 229 HashMap::Entry* entry = sampler_map_.LookupOrInsert(ThreadKey(thread_id), |
| 233 sampler_map_.Pointer()->LookupOrInsert(ThreadKey(thread_id), | 230 ThreadHash(thread_id)); |
| 234 ThreadHash(thread_id)); | 231 DCHECK(entry != nullptr); |
| 235 DCHECK(entry != NULL); | 232 if (entry->value == nullptr) { |
| 236 if (entry->value == NULL) { | |
| 237 SamplerList* samplers = new SamplerList(); | 233 SamplerList* samplers = new SamplerList(); |
| 238 samplers->push_back(sampler); | 234 samplers->push_back(sampler); |
| 239 entry->value = samplers; | 235 entry->value = samplers; |
| 240 } else { | 236 } else { |
| 241 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); | 237 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| 242 bool exists = false; | 238 bool exists = false; |
| 243 for (SamplerListIterator iter = samplers->begin(); | 239 for (SamplerListIterator iter = samplers->begin(); |
| 244 iter != samplers->end(); ++iter) { | 240 iter != samplers->end(); ++iter) { |
| 245 if (*iter == sampler) { | 241 if (*iter == sampler) { |
| 246 exists = true; | 242 exists = true; |
| 247 break; | 243 break; |
| 248 } | 244 } |
| 249 } | 245 } |
| 250 if (!exists) { | 246 if (!exists) { |
| 251 samplers->push_back(sampler); | 247 samplers->push_back(sampler); |
| 252 } | 248 } |
| 253 } | 249 } |
| 254 } | 250 } |
| 255 | 251 |
| 256 static void RemoveSampler(Sampler* sampler) { | 252 void RemoveSampler(Sampler* sampler) { |
| 257 AtomicGuard atomic_guard(&samplers_access_counter_); | 253 AtomicGuard atomic_guard(&samplers_access_counter_); |
| 258 DCHECK(sampler->IsActive() || sampler->IsRegistered()); | 254 DCHECK(sampler->IsActive() || sampler->IsRegistered()); |
| 259 // Remove sampler from map. | 255 // Remove sampler from map. |
| 260 pthread_t thread_id = sampler->platform_data()->vm_tid(); | 256 pthread_t thread_id = sampler->platform_data()->vm_tid(); |
| 261 void* thread_key = ThreadKey(thread_id); | 257 void* thread_key = ThreadKey(thread_id); |
| 262 uint32_t thread_hash = ThreadHash(thread_id); | 258 uint32_t thread_hash = ThreadHash(thread_id); |
| 263 HashMap::Entry* entry = sampler_map_.Get().Lookup(thread_key, thread_hash); | 259 HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash); |
| 264 DCHECK(entry != NULL); | 260 DCHECK(entry != nullptr); |
| 265 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); | 261 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); |
| 266 for (SamplerListIterator iter = samplers->begin(); iter != samplers->end(); | 262 for (SamplerListIterator iter = samplers->begin(); iter != samplers->end(); |
| 267 ++iter) { | 263 ++iter) { |
| 268 if (*iter == sampler) { | 264 if (*iter == sampler) { |
| 269 samplers->erase(iter); | 265 samplers->erase(iter); |
| 270 break; | 266 break; |
| 271 } | 267 } |
| 272 } | 268 } |
| 273 if (samplers->empty()) { | 269 if (samplers->empty()) { |
| 274 sampler_map_.Pointer()->Remove(thread_key, thread_hash); | 270 sampler_map_.Remove(thread_key, thread_hash); |
| 275 delete samplers; | 271 delete samplers; |
| 276 } | 272 } |
| 277 } | 273 } |
| 278 | 274 |
| 275 #if defined(USE_SIGNALS) |
| 276 void DoSample(const v8::RegisterState& state) { |
| 277 AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false); |
| 278 if (!atomic_guard.is_success()) return; |
| 279 pthread_t thread_id = pthread_self(); |
| 280 HashMap::Entry* entry = |
| 281 sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id)); |
| 282 if (!entry) return; |
| 283 SamplerList& samplers = *static_cast<SamplerList*>(entry->value); |
| 284 |
| 285 for (int i = 0; i < samplers.size(); ++i) { |
| 286 Sampler* sampler = samplers[i]; |
| 287 Isolate* isolate = sampler->isolate(); |
| 288 // We require a fully initialized and entered isolate. |
| 289 if (isolate == nullptr || !isolate->IsInUse()) continue; |
| 290 if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue; |
| 291 sampler->SampleStack(state); |
| 292 } |
| 293 } |
| 294 #endif |
| 295 |
| 296 static SamplerManager* instance() { return instance_.Pointer(); } |
| 297 |
| 279 private: | 298 private: |
| 280 struct HashMapCreateTrait { | 299 HashMap sampler_map_; |
| 281 static void Construct(HashMap* allocated_ptr) { | 300 static AtomicMutex samplers_access_counter_; |
| 282 new (allocated_ptr) HashMap(HashMap::PointersMatch); | 301 static base::LazyInstance<SamplerManager>::type instance_; |
| 283 } | |
| 284 }; | |
| 285 friend class SignalHandler; | |
| 286 static base::LazyInstance<HashMap, HashMapCreateTrait>::type | |
| 287 sampler_map_; | |
| 288 static base::AtomicValue<int> samplers_access_counter_; | |
| 289 }; | 302 }; |
| 290 | 303 |
| 291 base::LazyInstance<HashMap, SamplerManager::HashMapCreateTrait>::type | 304 AtomicMutex SamplerManager::samplers_access_counter_; |
| 292 SamplerManager::sampler_map_ = LAZY_INSTANCE_INITIALIZER; | 305 base::LazyInstance<SamplerManager>::type SamplerManager::instance_ = |
| 293 base::AtomicValue<int> SamplerManager::samplers_access_counter_(0); | 306 LAZY_INSTANCE_INITIALIZER; |
| 294 | |
| 295 | 307 |
| 296 #elif V8_OS_WIN || V8_OS_CYGWIN | 308 #elif V8_OS_WIN || V8_OS_CYGWIN |
| 297 | 309 |
| 298 // ---------------------------------------------------------------------------- | 310 // ---------------------------------------------------------------------------- |
| 299 // Win32 profiler support. On Cygwin we use the same sampler implementation as | 311 // Win32 profiler support. On Cygwin we use the same sampler implementation as |
| 300 // on Win32. | 312 // on Win32. |
| 301 | 313 |
| 302 class Sampler::PlatformData { | 314 class Sampler::PlatformData { |
| 303 public: | 315 public: |
| 304 // Get a handle to the calling thread. This is the thread that we are | 316 // Get a handle to the calling thread. This is the thread that we are |
| 305 // going to profile. We need to make a copy of the handle because we are | 317 // going to profile. We need to make a copy of the handle because we are |
| 306 // going to use it in the sampler thread. Using GetThreadHandle() will | 318 // going to use it in the sampler thread. Using GetThreadHandle() will |
| 307 // not work in this case. We're using OpenThread because DuplicateHandle | 319 // not work in this case. We're using OpenThread because DuplicateHandle |
| 308 // for some reason doesn't work in Chrome's sandbox. | 320 // for some reason doesn't work in Chrome's sandbox. |
| 309 PlatformData() | 321 PlatformData() |
| 310 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | | 322 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | |
| 311 THREAD_SUSPEND_RESUME | | 323 THREAD_SUSPEND_RESUME | |
| 312 THREAD_QUERY_INFORMATION, | 324 THREAD_QUERY_INFORMATION, |
| 313 false, | 325 false, |
| 314 GetCurrentThreadId())) {} | 326 GetCurrentThreadId())) {} |
| 315 | 327 |
| 316 ~PlatformData() { | 328 ~PlatformData() { |
| 317 if (profiled_thread_ != NULL) { | 329 if (profiled_thread_ != nullptr) { |
| 318 CloseHandle(profiled_thread_); | 330 CloseHandle(profiled_thread_); |
| 319 profiled_thread_ = NULL; | 331 profiled_thread_ = nullptr; |
| 320 } | 332 } |
| 321 } | 333 } |
| 322 | 334 |
| 323 HANDLE profiled_thread() { return profiled_thread_; } | 335 HANDLE profiled_thread() { return profiled_thread_; } |
| 324 | 336 |
| 325 private: | 337 private: |
| 326 HANDLE profiled_thread_; | 338 HANDLE profiled_thread_; |
| 327 }; | 339 }; |
| 328 #endif // USE_SIGNALS | 340 #endif // USE_SIGNALS |
| 329 | 341 |
| 330 | 342 |
| 331 #if defined(USE_SIGNALS) | 343 #if defined(USE_SIGNALS) |
| 332 class SignalHandler { | 344 class SignalHandler { |
| 333 public: | 345 public: |
| 334 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } | 346 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } |
| 335 static void TearDown() { delete mutex_; mutex_ = NULL; } | 347 static void TearDown() { |
| 348 delete mutex_; |
| 349 mutex_ = nullptr; |
| 350 } |
| 336 | 351 |
| 337 static void IncreaseSamplerCount() { | 352 static void IncreaseSamplerCount() { |
| 338 base::LockGuard<base::Mutex> lock_guard(mutex_); | 353 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 339 if (++client_count_ == 1) Install(); | 354 if (++client_count_ == 1) Install(); |
| 340 } | 355 } |
| 341 | 356 |
| 342 static void DecreaseSamplerCount() { | 357 static void DecreaseSamplerCount() { |
| 343 base::LockGuard<base::Mutex> lock_guard(mutex_); | 358 base::LockGuard<base::Mutex> lock_guard(mutex_); |
| 344 if (--client_count_ == 0) Restore(); | 359 if (--client_count_ == 0) Restore(); |
| 345 } | 360 } |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 378 static void FillRegisterState(void* context, RegisterState* regs); | 393 static void FillRegisterState(void* context, RegisterState* regs); |
| 379 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); | 394 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); |
| 380 #endif | 395 #endif |
| 381 // Protects the process wide state below. | 396 // Protects the process wide state below. |
| 382 static base::Mutex* mutex_; | 397 static base::Mutex* mutex_; |
| 383 static int client_count_; | 398 static int client_count_; |
| 384 static bool signal_handler_installed_; | 399 static bool signal_handler_installed_; |
| 385 static struct sigaction old_signal_handler_; | 400 static struct sigaction old_signal_handler_; |
| 386 }; | 401 }; |
| 387 | 402 |
| 388 | 403 base::Mutex* SignalHandler::mutex_ = nullptr; |
| 389 base::Mutex* SignalHandler::mutex_ = NULL; | |
| 390 int SignalHandler::client_count_ = 0; | 404 int SignalHandler::client_count_ = 0; |
| 391 struct sigaction SignalHandler::old_signal_handler_; | 405 struct sigaction SignalHandler::old_signal_handler_; |
| 392 bool SignalHandler::signal_handler_installed_ = false; | 406 bool SignalHandler::signal_handler_installed_ = false; |
| 393 | 407 |
| 394 | 408 |
| 395 // As Native Client does not support signal handling, profiling is disabled. | 409 // As Native Client does not support signal handling, profiling is disabled. |
| 396 #if !V8_OS_NACL | 410 #if !V8_OS_NACL |
| 397 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, | 411 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, |
| 398 void* context) { | 412 void* context) { |
| 399 USE(info); | 413 USE(info); |
| 400 if (signal != SIGPROF) return; | 414 if (signal != SIGPROF) return; |
| 401 AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false); | |
| 402 if (!atomic_guard.is_success()) return; | |
| 403 pthread_t thread_id = pthread_self(); | |
| 404 HashMap::Entry* entry = | |
| 405 SamplerManager::sampler_map_.Pointer()->Lookup(ThreadKey(thread_id), | |
| 406 ThreadHash(thread_id)); | |
| 407 if (entry == NULL) return; | |
| 408 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value); | |
| 409 | |
| 410 v8::RegisterState state; | 415 v8::RegisterState state; |
| 411 FillRegisterState(context, &state); | 416 FillRegisterState(context, &state); |
| 412 | 417 SamplerManager::instance()->DoSample(state); |
| 413 for (int i = 0; i < samplers->size(); ++i) { | |
| 414 Sampler* sampler = (*samplers)[i]; | |
| 415 Isolate* isolate = sampler->isolate(); | |
| 416 | |
| 417 // We require a fully initialized and entered isolate. | |
| 418 if (isolate == NULL || !isolate->IsInUse()) return; | |
| 419 | |
| 420 if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) return; | |
| 421 | |
| 422 sampler->SampleStack(state); | |
| 423 } | |
| 424 } | 418 } |
| 425 | 419 |
| 426 void SignalHandler::FillRegisterState(void* context, RegisterState* state) { | 420 void SignalHandler::FillRegisterState(void* context, RegisterState* state) { |
| 427 // Extracting the sample from the context is extremely machine dependent. | 421 // Extracting the sample from the context is extremely machine dependent. |
| 428 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); | 422 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| 429 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) | 423 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) |
| 430 mcontext_t& mcontext = ucontext->uc_mcontext; | 424 mcontext_t& mcontext = ucontext->uc_mcontext; |
| 431 #endif | 425 #endif |
| 432 #if V8_OS_LINUX | 426 #if V8_OS_LINUX |
| 433 #if V8_HOST_ARCH_IA32 | 427 #if V8_HOST_ARCH_IA32 |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 585 has_processing_thread_(false), | 579 has_processing_thread_(false), |
| 586 active_(false), | 580 active_(false), |
| 587 registered_(false) { | 581 registered_(false) { |
| 588 data_ = new PlatformData; | 582 data_ = new PlatformData; |
| 589 } | 583 } |
| 590 | 584 |
| 591 Sampler::~Sampler() { | 585 Sampler::~Sampler() { |
| 592 DCHECK(!IsActive()); | 586 DCHECK(!IsActive()); |
| 593 #if defined(USE_SIGNALS) | 587 #if defined(USE_SIGNALS) |
| 594 if (IsRegistered()) { | 588 if (IsRegistered()) { |
| 595 SamplerManager::RemoveSampler(this); | 589 SamplerManager::instance()->RemoveSampler(this); |
| 596 } | 590 } |
| 597 #endif | 591 #endif |
| 598 delete data_; | 592 delete data_; |
| 599 } | 593 } |
| 600 | 594 |
| 601 void Sampler::Start() { | 595 void Sampler::Start() { |
| 602 DCHECK(!IsActive()); | 596 DCHECK(!IsActive()); |
| 603 SetActive(true); | 597 SetActive(true); |
| 604 #if defined(USE_SIGNALS) | 598 #if defined(USE_SIGNALS) |
| 605 SamplerManager::AddSampler(this); | 599 SamplerManager::instance()->AddSampler(this); |
| 606 #endif | 600 #endif |
| 607 } | 601 } |
| 608 | 602 |
| 609 | 603 |
| 610 void Sampler::Stop() { | 604 void Sampler::Stop() { |
| 611 #if defined(USE_SIGNALS) | 605 #if defined(USE_SIGNALS) |
| 612 SamplerManager::RemoveSampler(this); | 606 SamplerManager::instance()->RemoveSampler(this); |
| 613 #endif | 607 #endif |
| 614 DCHECK(IsActive()); | 608 DCHECK(IsActive()); |
| 615 SetActive(false); | 609 SetActive(false); |
| 616 SetRegistered(false); | 610 SetRegistered(false); |
| 617 } | 611 } |
| 618 | 612 |
| 619 | 613 |
| 620 void Sampler::IncreaseProfilingDepth() { | 614 void Sampler::IncreaseProfilingDepth() { |
| 621 base::NoBarrier_AtomicIncrement(&profiling_, 1); | 615 base::NoBarrier_AtomicIncrement(&profiling_, 1); |
| 622 #if defined(USE_SIGNALS) | 616 #if defined(USE_SIGNALS) |
| 623 SignalHandler::IncreaseSamplerCount(); | 617 SignalHandler::IncreaseSamplerCount(); |
| 624 #endif | 618 #endif |
| 625 } | 619 } |
| 626 | 620 |
| 627 | 621 |
| 628 void Sampler::DecreaseProfilingDepth() { | 622 void Sampler::DecreaseProfilingDepth() { |
| 629 #if defined(USE_SIGNALS) | 623 #if defined(USE_SIGNALS) |
| 630 SignalHandler::DecreaseSamplerCount(); | 624 SignalHandler::DecreaseSamplerCount(); |
| 631 #endif | 625 #endif |
| 632 base::NoBarrier_AtomicIncrement(&profiling_, -1); | 626 base::NoBarrier_AtomicIncrement(&profiling_, -1); |
| 633 } | 627 } |
| 634 | 628 |
| 635 | 629 |
| 636 #if defined(USE_SIGNALS) | 630 #if defined(USE_SIGNALS) |
| 637 | 631 |
| 638 void Sampler::DoSample() { | 632 void Sampler::DoSample() { |
| 639 if (!SignalHandler::Installed()) return; | 633 if (!SignalHandler::Installed()) return; |
| 640 if (!IsActive() && !IsRegistered()) { | 634 if (!IsActive() && !IsRegistered()) { |
| 641 SamplerManager::AddSampler(this); | 635 SamplerManager::instance()->AddSampler(this); |
| 642 SetRegistered(true); | 636 SetRegistered(true); |
| 643 } | 637 } |
| 644 pthread_kill(platform_data()->vm_tid(), SIGPROF); | 638 pthread_kill(platform_data()->vm_tid(), SIGPROF); |
| 645 } | 639 } |
| 646 | 640 |
| 647 #elif V8_OS_WIN || V8_OS_CYGWIN | 641 #elif V8_OS_WIN || V8_OS_CYGWIN |
| 648 | 642 |
| 649 void Sampler::DoSample() { | 643 void Sampler::DoSample() { |
| 650 HANDLE profiled_thread = platform_data()->profiled_thread(); | 644 HANDLE profiled_thread = platform_data()->profiled_thread(); |
| 651 if (profiled_thread == NULL) return; | 645 if (profiled_thread == nullptr) return; |
| 652 | 646 |
| 653 const DWORD kSuspendFailed = static_cast<DWORD>(-1); | 647 const DWORD kSuspendFailed = static_cast<DWORD>(-1); |
| 654 if (SuspendThread(profiled_thread) == kSuspendFailed) return; | 648 if (SuspendThread(profiled_thread) == kSuspendFailed) return; |
| 655 | 649 |
| 656 // Context used for sampling the register state of the profiled thread. | 650 // Context used for sampling the register state of the profiled thread. |
| 657 CONTEXT context; | 651 CONTEXT context; |
| 658 memset(&context, 0, sizeof(context)); | 652 memset(&context, 0, sizeof(context)); |
| 659 context.ContextFlags = CONTEXT_FULL; | 653 context.ContextFlags = CONTEXT_FULL; |
| 660 if (GetThreadContext(profiled_thread, &context) != 0) { | 654 if (GetThreadContext(profiled_thread, &context) != 0) { |
| 661 v8::RegisterState state; | 655 v8::RegisterState state; |
| 662 #if V8_HOST_ARCH_X64 | 656 #if V8_HOST_ARCH_X64 |
| 663 state.pc = reinterpret_cast<void*>(context.Rip); | 657 state.pc = reinterpret_cast<void*>(context.Rip); |
| 664 state.sp = reinterpret_cast<void*>(context.Rsp); | 658 state.sp = reinterpret_cast<void*>(context.Rsp); |
| 665 state.fp = reinterpret_cast<void*>(context.Rbp); | 659 state.fp = reinterpret_cast<void*>(context.Rbp); |
| 666 #else | 660 #else |
| 667 state.pc = reinterpret_cast<void*>(context.Eip); | 661 state.pc = reinterpret_cast<void*>(context.Eip); |
| 668 state.sp = reinterpret_cast<void*>(context.Esp); | 662 state.sp = reinterpret_cast<void*>(context.Esp); |
| 669 state.fp = reinterpret_cast<void*>(context.Ebp); | 663 state.fp = reinterpret_cast<void*>(context.Ebp); |
| 670 #endif | 664 #endif |
| 671 SampleStack(state); | 665 SampleStack(state); |
| 672 } | 666 } |
| 673 ResumeThread(profiled_thread); | 667 ResumeThread(profiled_thread); |
| 674 } | 668 } |
| 675 | 669 |
| 676 #endif // USE_SIGNALS | 670 #endif // USE_SIGNALS |
| 677 | 671 |
| 678 } // namespace sampler | 672 } // namespace sampler |
| 679 } // namespace v8 | 673 } // namespace v8 |
| OLD | NEW |