OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 #endif // def __GLIBC__ | 49 #endif // def __GLIBC__ |
50 #include <strings.h> // index | 50 #include <strings.h> // index |
51 #include <errno.h> | 51 #include <errno.h> |
52 #include <stdarg.h> | 52 #include <stdarg.h> |
53 | 53 |
54 #undef MAP_TYPE | 54 #undef MAP_TYPE |
55 | 55 |
56 #include "v8.h" | 56 #include "v8.h" |
57 | 57 |
58 #include "platform.h" | 58 #include "platform.h" |
| 59 #include "top.h" |
| 60 #include "v8threads.h" |
59 | 61 |
60 | 62 |
61 namespace v8 { | 63 namespace v8 { |
62 namespace internal { | 64 namespace internal { |
63 | 65 |
64 // 0 is never a valid thread id on Linux since tids and pids share a | 66 // 0 is never a valid thread id on Linux since tids and pids share a |
65 // name space and pid 0 is reserved (see man 2 kill). | 67 // name space and pid 0 is reserved (see man 2 kill). |
66 static const pthread_t kNoThread = (pthread_t) 0; | 68 static const pthread_t kNoThread = (pthread_t) 0; |
67 | 69 |
68 | 70 |
(...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
573 | 575 |
574 | 576 |
575 Semaphore* OS::CreateSemaphore(int count) { | 577 Semaphore* OS::CreateSemaphore(int count) { |
576 return new LinuxSemaphore(count); | 578 return new LinuxSemaphore(count); |
577 } | 579 } |
578 | 580 |
579 | 581 |
580 #ifdef ENABLE_LOGGING_AND_PROFILING | 582 #ifdef ENABLE_LOGGING_AND_PROFILING |
581 | 583 |
582 static Sampler* active_sampler_ = NULL; | 584 static Sampler* active_sampler_ = NULL; |
| 585 static pthread_t vm_thread_ = 0; |
583 | 586 |
584 | 587 |
585 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) | 588 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) |
586 // Android runs a fairly new Linux kernel, so signal info is there, | 589 // Android runs a fairly new Linux kernel, so signal info is there, |
587 // but the C library doesn't have the structs defined. | 590 // but the C library doesn't have the structs defined. |
588 | 591 |
589 struct sigcontext { | 592 struct sigcontext { |
590 uint32_t trap_no; | 593 uint32_t trap_no; |
591 uint32_t error_code; | 594 uint32_t error_code; |
592 uint32_t oldmask; | 595 uint32_t oldmask; |
593 uint32_t gregs[16]; | 596 uint32_t gregs[16]; |
594 uint32_t arm_cpsr; | 597 uint32_t arm_cpsr; |
595 uint32_t fault_address; | 598 uint32_t fault_address; |
596 }; | 599 }; |
597 typedef uint32_t __sigset_t; | 600 typedef uint32_t __sigset_t; |
598 typedef struct sigcontext mcontext_t; | 601 typedef struct sigcontext mcontext_t; |
599 typedef struct ucontext { | 602 typedef struct ucontext { |
600 uint32_t uc_flags; | 603 uint32_t uc_flags; |
601 struct ucontext *uc_link; | 604 struct ucontext *uc_link; |
602 stack_t uc_stack; | 605 stack_t uc_stack; |
603 mcontext_t uc_mcontext; | 606 mcontext_t uc_mcontext; |
604 __sigset_t uc_sigmask; | 607 __sigset_t uc_sigmask; |
605 } ucontext_t; | 608 } ucontext_t; |
606 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; | 609 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; |
607 | 610 |
608 #endif | 611 #endif |
609 | 612 |
610 | 613 |
| 614 // A function that determines if a signal handler is called in the context |
| 615 // of a VM thread. |
| 616 // |
| 617 // The problem is that SIGPROF signal can be delivered to an arbitrary thread |
| 618 // (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2) |
| 619 // So, if the signal is being handled in the context of a non-VM thread, |
| 620 // it means that the VM thread is running, and trying to sample its stack can |
| 621 // cause a crash. |
| 622 static inline bool IsVmThread() { |
| 623 // In the case of a single VM thread, this check is enough. |
| 624 if (pthread_equal(pthread_self(), vm_thread_)) return true; |
| 625 // If there are multiple threads that use VM, they must have a thread id |
| 626 // stored in TLS. To verify that the thread is really executing VM, |
| 627 // we check Top's data. Having that ThreadManager::RestoreThread first |
| 628 // restores ThreadLocalTop from TLS, and only then erases the TLS value, |
| 629 // reading Top::thread_id() should not be affected by races. |
| 630 if (ThreadManager::HasId() && !ThreadManager::IsArchived() && |
| 631 ThreadManager::CurrentId() == Top::thread_id()) { |
| 632 return true; |
| 633 } |
| 634 return false; |
| 635 } |
| 636 |
| 637 |
611 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { | 638 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { |
612 USE(info); | 639 USE(info); |
613 if (signal != SIGPROF) return; | 640 if (signal != SIGPROF) return; |
614 if (active_sampler_ == NULL) return; | 641 if (active_sampler_ == NULL) return; |
615 | 642 |
616 TickSample sample; | 643 TickSample sample; |
617 | 644 |
618 // If profiling, we extract the current pc and sp. | 645 // If profiling, we extract the current pc and sp. |
619 if (active_sampler_->IsProfiling()) { | 646 if (active_sampler_->IsProfiling()) { |
620 // Extracting the sample from the context is extremely machine dependent. | 647 // Extracting the sample from the context is extremely machine dependent. |
(...skipping 12 matching lines...) Expand all Loading... |
633 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) | 660 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) |
634 sample.pc = mcontext.gregs[R15]; | 661 sample.pc = mcontext.gregs[R15]; |
635 sample.sp = mcontext.gregs[R13]; | 662 sample.sp = mcontext.gregs[R13]; |
636 sample.fp = mcontext.gregs[R11]; | 663 sample.fp = mcontext.gregs[R11]; |
637 #else | 664 #else |
638 sample.pc = mcontext.arm_pc; | 665 sample.pc = mcontext.arm_pc; |
639 sample.sp = mcontext.arm_sp; | 666 sample.sp = mcontext.arm_sp; |
640 sample.fp = mcontext.arm_fp; | 667 sample.fp = mcontext.arm_fp; |
641 #endif | 668 #endif |
642 #endif | 669 #endif |
643 active_sampler_->SampleStack(&sample); | 670 if (IsVmThread()) |
| 671 active_sampler_->SampleStack(&sample); |
644 } | 672 } |
645 | 673 |
646 // We always sample the VM state. | 674 // We always sample the VM state. |
647 sample.state = Logger::state(); | 675 sample.state = Logger::state(); |
648 | 676 |
649 active_sampler_->Tick(&sample); | 677 active_sampler_->Tick(&sample); |
650 } | 678 } |
651 | 679 |
652 | 680 |
653 class Sampler::PlatformData : public Malloced { | 681 class Sampler::PlatformData : public Malloced { |
(...skipping 17 matching lines...) Expand all Loading... |
671 Sampler::~Sampler() { | 699 Sampler::~Sampler() { |
672 delete data_; | 700 delete data_; |
673 } | 701 } |
674 | 702 |
675 | 703 |
676 void Sampler::Start() { | 704 void Sampler::Start() { |
677 // There can only be one active sampler at the time on POSIX | 705 // There can only be one active sampler at the time on POSIX |
678 // platforms. | 706 // platforms. |
679 if (active_sampler_ != NULL) return; | 707 if (active_sampler_ != NULL) return; |
680 | 708 |
| 709 vm_thread_ = pthread_self(); |
| 710 |
681 // Request profiling signals. | 711 // Request profiling signals. |
682 struct sigaction sa; | 712 struct sigaction sa; |
683 sa.sa_sigaction = ProfilerSignalHandler; | 713 sa.sa_sigaction = ProfilerSignalHandler; |
684 sigemptyset(&sa.sa_mask); | 714 sigemptyset(&sa.sa_mask); |
685 sa.sa_flags = SA_SIGINFO; | 715 sa.sa_flags = SA_SIGINFO; |
686 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; | 716 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; |
687 data_->signal_handler_installed_ = true; | 717 data_->signal_handler_installed_ = true; |
688 | 718 |
689 // Set the itimer to generate a tick for each interval. | 719 // Set the itimer to generate a tick for each interval. |
690 itimerval itimer; | 720 itimerval itimer; |
(...skipping 15 matching lines...) Expand all Loading... |
706 setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL); | 736 setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL); |
707 sigaction(SIGPROF, &data_->old_signal_handler_, 0); | 737 sigaction(SIGPROF, &data_->old_signal_handler_, 0); |
708 data_->signal_handler_installed_ = false; | 738 data_->signal_handler_installed_ = false; |
709 } | 739 } |
710 | 740 |
711 // This sampler is no longer the active sampler. | 741 // This sampler is no longer the active sampler. |
712 active_sampler_ = NULL; | 742 active_sampler_ = NULL; |
713 active_ = false; | 743 active_ = false; |
714 } | 744 } |
715 | 745 |
| 746 |
716 #endif // ENABLE_LOGGING_AND_PROFILING | 747 #endif // ENABLE_LOGGING_AND_PROFILING |
717 | 748 |
718 } } // namespace v8::internal | 749 } } // namespace v8::internal |
OLD | NEW |