| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 43 #include <unistd.h> // getpagesize | 43 #include <unistd.h> // getpagesize |
| 44 #include <execinfo.h> // backtrace, backtrace_symbols | 44 #include <execinfo.h> // backtrace, backtrace_symbols |
| 45 #include <strings.h> // index | 45 #include <strings.h> // index |
| 46 #include <errno.h> | 46 #include <errno.h> |
| 47 #include <stdarg.h> | 47 #include <stdarg.h> |
| 48 #include <limits.h> | 48 #include <limits.h> |
| 49 | 49 |
| 50 #undef MAP_TYPE | 50 #undef MAP_TYPE |
| 51 | 51 |
| 52 #include "v8.h" | 52 #include "v8.h" |
| 53 #include "v8threads.h" |
| 53 | 54 |
| 54 #include "platform.h" | 55 #include "platform.h" |
| 55 #include "vm-state-inl.h" | 56 #include "vm-state-inl.h" |
| 56 | 57 |
| 57 | 58 |
| 58 namespace v8 { | 59 namespace v8 { |
| 59 namespace internal { | 60 namespace internal { |
| 60 | 61 |
| 61 // 0 is never a valid thread id on OpenBSD since tids and pids share a | 62 // 0 is never a valid thread id on OpenBSD since tids and pids share a |
| 62 // name space and pid 0 is used to kill the group (see man 2 kill). | 63 // name space and pid 0 is used to kill the group (see man 2 kill). |
| 63 static const pthread_t kNoThread = (pthread_t) 0; | 64 static const pthread_t kNoThread = (pthread_t) 0; |
| 64 | 65 |
| 65 | 66 |
| 66 double ceiling(double x) { | 67 double ceiling(double x) { |
| 67 // Correct as on OS X | 68 // Correct as on OS X |
| 68 if (-1.0 < x && x < 0.0) { | 69 if (-1.0 < x && x < 0.0) { |
| 69 return -0.0; | 70 return -0.0; |
| 70 } else { | 71 } else { |
| 71 return ceil(x); | 72 return ceil(x); |
| 72 } | 73 } |
| 73 } | 74 } |
| 74 | 75 |
| 75 | 76 |
| 77 static Mutex* limit_mutex = NULL; |
| 78 |
| 79 |
| 76 void OS::Setup() { | 80 void OS::Setup() { |
| 77 // Seed the random number generator. | 81 // Seed the random number generator. |
| 78 // Convert the current time to a 64-bit integer first, before converting it | 82 // Convert the current time to a 64-bit integer first, before converting it |
| 79 // to an unsigned. Going directly can cause an overflow and the seed to be | 83 // to an unsigned. Going directly can cause an overflow and the seed to be |
| 80 // set to all ones. The seed will be identical for different instances that | 84 // set to all ones. The seed will be identical for different instances that |
| 81 // call this setup code within the same millisecond. | 85 // call this setup code within the same millisecond. |
| 82 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); | 86 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); |
| 83 srandom(static_cast<unsigned int>(seed)); | 87 srandom(static_cast<unsigned int>(seed)); |
| 88 limit_mutex = CreateMutex(); |
| 84 } | 89 } |
| 85 | 90 |
| 86 | 91 |
| 87 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { | 92 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { |
| 88 __asm__ __volatile__("" : : : "memory"); | 93 __asm__ __volatile__("" : : : "memory"); |
| 89 *ptr = value; | 94 *ptr = value; |
| 90 } | 95 } |
| 91 | 96 |
| 92 | 97 |
| 93 uint64_t OS::CpuFeaturesImpliedByPlatform() { | 98 uint64_t OS::CpuFeaturesImpliedByPlatform() { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 122 // We keep the lowest and highest addresses mapped as a quick way of | 127 // We keep the lowest and highest addresses mapped as a quick way of |
| 123 // determining that pointers are outside the heap (used mostly in assertions | 128 // determining that pointers are outside the heap (used mostly in assertions |
| 124 // and verification). The estimate is conservative, ie, not all addresses in | 129 // and verification). The estimate is conservative, ie, not all addresses in |
| 125 // 'allocated' space are actually allocated to our heap. The range is | 130 // 'allocated' space are actually allocated to our heap. The range is |
| 126 // [lowest, highest), inclusive on the low and and exclusive on the high end. | 131 // [lowest, highest), inclusive on the low and and exclusive on the high end. |
| 127 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | 132 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); |
| 128 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | 133 static void* highest_ever_allocated = reinterpret_cast<void*>(0); |
| 129 | 134 |
| 130 | 135 |
| 131 static void UpdateAllocatedSpaceLimits(void* address, int size) { | 136 static void UpdateAllocatedSpaceLimits(void* address, int size) { |
| 137 ASSERT(limit_mutex != NULL); |
| 138 ScopedLock lock(limit_mutex); |
| 139 |
| 132 lowest_ever_allocated = Min(lowest_ever_allocated, address); | 140 lowest_ever_allocated = Min(lowest_ever_allocated, address); |
| 133 highest_ever_allocated = | 141 highest_ever_allocated = |
| 134 Max(highest_ever_allocated, | 142 Max(highest_ever_allocated, |
| 135 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | 143 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
| 136 } | 144 } |
| 137 | 145 |
| 138 | 146 |
| 139 bool OS::IsOutsideAllocatedSpace(void* address) { | 147 bool OS::IsOutsideAllocatedSpace(void* address) { |
| 140 return address < lowest_ever_allocated || address >= highest_ever_allocated; | 148 return address < lowest_ever_allocated || address >= highest_ever_allocated; |
| 141 } | 149 } |
| (...skipping 15 matching lines...) Expand all Loading... |
| 157 LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); | 165 LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); |
| 158 return NULL; | 166 return NULL; |
| 159 } | 167 } |
| 160 *allocated = msize; | 168 *allocated = msize; |
| 161 UpdateAllocatedSpaceLimits(mbase, msize); | 169 UpdateAllocatedSpaceLimits(mbase, msize); |
| 162 return mbase; | 170 return mbase; |
| 163 } | 171 } |
| 164 | 172 |
| 165 | 173 |
| 166 void OS::Free(void* buf, const size_t length) { | 174 void OS::Free(void* buf, const size_t length) { |
| 175 // TODO(1240712): munmap has a return value which is ignored here. |
| 167 int result = munmap(buf, length); | 176 int result = munmap(buf, length); |
| 168 USE(result); | 177 USE(result); |
| 169 ASSERT(result == 0); | 178 ASSERT(result == 0); |
| 170 } | 179 } |
| 171 | 180 |
| 172 | 181 |
| 173 #ifdef ENABLE_HEAP_PROTECTION | 182 #ifdef ENABLE_HEAP_PROTECTION |
| 174 | 183 |
| 175 void OS::Protect(void* address, size_t size) { | 184 void OS::Protect(void* address, size_t size) { |
| 176 UNIMPLEMENTED(); | 185 UNIMPLEMENTED(); |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 290 result = read(fd, buffer + bytes_read, 1); | 299 result = read(fd, buffer + bytes_read, 1); |
| 291 if (result < 1) break; | 300 if (result < 1) break; |
| 292 } while (buffer[bytes_read] != '\n'); | 301 } while (buffer[bytes_read] != '\n'); |
| 293 buffer[bytes_read] = 0; | 302 buffer[bytes_read] = 0; |
| 294 // Ignore mappings that are not executable. | 303 // Ignore mappings that are not executable. |
| 295 if (buffer[3] != 'x') continue; | 304 if (buffer[3] != 'x') continue; |
| 296 char* start_of_path = index(buffer, '/'); | 305 char* start_of_path = index(buffer, '/'); |
| 297 // There may be no filename in this line. Skip to next. | 306 // There may be no filename in this line. Skip to next. |
| 298 if (start_of_path == NULL) continue; | 307 if (start_of_path == NULL) continue; |
| 299 buffer[bytes_read] = 0; | 308 buffer[bytes_read] = 0; |
| 300 LOG(SharedLibraryEvent(start_of_path, start, end)); | 309 LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); |
| 301 } | 310 } |
| 302 close(fd); | 311 close(fd); |
| 303 #endif | 312 #endif |
| 304 } | 313 } |
| 305 | 314 |
| 306 | 315 |
| 307 void OS::SignalCodeMovingGC() { | 316 void OS::SignalCodeMovingGC() { |
| 308 } | 317 } |
| 309 | 318 |
| 310 | 319 |
| 311 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 320 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
| 312 UNIMPLEMENTED(); | 321 int frames_size = frames.length(); |
| 313 return 1; | 322 ScopedVector<void*> addresses(frames_size); |
| 323 |
| 324 int frames_count = backtrace(addresses.start(), frames_size); |
| 325 |
| 326 char** symbols = backtrace_symbols(addresses.start(), frames_count); |
| 327 if (symbols == NULL) { |
| 328 return kStackWalkError; |
| 329 } |
| 330 |
| 331 for (int i = 0; i < frames_count; i++) { |
| 332 frames[i].address = addresses[i]; |
| 333 // Format a text representation of the frame based on the information |
| 334 // available. |
| 335 SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), |
| 336 "%s", |
| 337 symbols[i]); |
| 338 // Make sure line termination is in place. |
| 339 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; |
| 340 } |
| 341 |
| 342 free(symbols); |
| 343 |
| 344 return frames_count; |
| 314 } | 345 } |
| 315 | 346 |
| 316 | 347 |
| 317 // Constants used for mmap. | 348 // Constants used for mmap. |
| 318 static const int kMmapFd = -1; | 349 static const int kMmapFd = -1; |
| 319 static const int kMmapFdOffset = 0; | 350 static const int kMmapFdOffset = 0; |
| 320 | 351 |
| 321 | 352 |
| 322 VirtualMemory::VirtualMemory(size_t size) { | 353 VirtualMemory::VirtualMemory(size_t size) { |
| 323 address_ = mmap(NULL, size, PROT_NONE, | 354 address_ = mmap(NULL, size, PROT_NONE, |
| (...skipping 23 matching lines...) Expand all Loading... |
| 347 return false; | 378 return false; |
| 348 } | 379 } |
| 349 | 380 |
| 350 UpdateAllocatedSpaceLimits(address, size); | 381 UpdateAllocatedSpaceLimits(address, size); |
| 351 return true; | 382 return true; |
| 352 } | 383 } |
| 353 | 384 |
| 354 | 385 |
| 355 bool VirtualMemory::Uncommit(void* address, size_t size) { | 386 bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 356 return mmap(address, size, PROT_NONE, | 387 return mmap(address, size, PROT_NONE, |
| 357 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | 388 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
| 358 kMmapFd, kMmapFdOffset) != MAP_FAILED; | 389 kMmapFd, kMmapFdOffset) != MAP_FAILED; |
| 359 } | 390 } |
| 360 | 391 |
| 361 | 392 |
| 362 class Thread::PlatformData : public Malloced { | 393 class Thread::PlatformData : public Malloced { |
| 363 public: | 394 public: |
| 364 PlatformData() : thread_(kNoThread) {} | |
| 365 | |
| 366 pthread_t thread_; // Thread handle for pthread. | 395 pthread_t thread_; // Thread handle for pthread. |
| 367 }; | 396 }; |
| 368 | 397 |
| 369 | 398 |
| 370 Thread::Thread(Isolate* isolate, const Options& options) | 399 Thread::Thread(Isolate* isolate, const Options& options) |
| 371 : data_(new PlatformData()), | 400 : data_(new PlatformData), |
| 372 isolate_(isolate), | 401 isolate_(isolate), |
| 373 stack_size_(options.stack_size) { | 402 stack_size_(options.stack_size) { |
| 374 set_name(options.name); | 403 set_name(options.name); |
| 375 } | 404 } |
| 376 | 405 |
| 377 | 406 |
| 378 Thread::Thread(Isolate* isolate, const char* name) | 407 Thread::Thread(Isolate* isolate, const char* name) |
| 379 : data_(new PlatfromData()), | 408 : data_(new PlatformData), |
| 380 isolate_(isolate), | 409 isolate_(isolate), |
| 381 stack_size_(0) { | 410 stack_size_(0) { |
| 382 set_name(name); | 411 set_name(name); |
| 383 } | 412 } |
| 384 | 413 |
| 385 | 414 |
| 386 Thread::~Thread() { | 415 Thread::~Thread() { |
| 387 delete data_; | 416 delete data_; |
| 388 } | 417 } |
| 389 | 418 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 409 | 438 |
| 410 void Thread::Start() { | 439 void Thread::Start() { |
| 411 pthread_attr_t* attr_ptr = NULL; | 440 pthread_attr_t* attr_ptr = NULL; |
| 412 pthread_attr_t attr; | 441 pthread_attr_t attr; |
| 413 if (stack_size_ > 0) { | 442 if (stack_size_ > 0) { |
| 414 pthread_attr_init(&attr); | 443 pthread_attr_init(&attr); |
| 415 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); | 444 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); |
| 416 attr_ptr = &attr; | 445 attr_ptr = &attr; |
| 417 } | 446 } |
| 418 pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this); | 447 pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this); |
| 419 ASSERT(IsValid()); | 448 ASSERT(data_->thread_ != kNoThread); |
| 420 } | 449 } |
| 421 | 450 |
| 422 | 451 |
| 423 void Thread::Join() { | 452 void Thread::Join() { |
| 424 pthread_join(data_->thread_, NULL); | 453 pthread_join(data_->thread_, NULL); |
| 425 } | 454 } |
| 426 | 455 |
| 427 | 456 |
| 428 Thread::LocalStorageKey Thread::CreateThreadLocalKey() { | 457 Thread::LocalStorageKey Thread::CreateThreadLocalKey() { |
| 429 pthread_key_t key; | 458 pthread_key_t key; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 477 virtual int Lock() { | 506 virtual int Lock() { |
| 478 int result = pthread_mutex_lock(&mutex_); | 507 int result = pthread_mutex_lock(&mutex_); |
| 479 return result; | 508 return result; |
| 480 } | 509 } |
| 481 | 510 |
| 482 virtual int Unlock() { | 511 virtual int Unlock() { |
| 483 int result = pthread_mutex_unlock(&mutex_); | 512 int result = pthread_mutex_unlock(&mutex_); |
| 484 return result; | 513 return result; |
| 485 } | 514 } |
| 486 | 515 |
| 516 virtual bool TryLock() { |
| 517 int result = pthread_mutex_trylock(&mutex_); |
| 518 // Return false if the lock is busy and locking failed. |
| 519 if (result == EBUSY) { |
| 520 return false; |
| 521 } |
| 522 ASSERT(result == 0); // Verify no other errors. |
| 523 return true; |
| 524 } |
| 525 |
| 487 private: | 526 private: |
| 488 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. | 527 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. |
| 489 }; | 528 }; |
| 490 | 529 |
| 491 | 530 |
| 492 Mutex* OS::CreateMutex() { | 531 Mutex* OS::CreateMutex() { |
| 493 return new OpenBSDMutex(); | 532 return new OpenBSDMutex(); |
| 494 } | 533 } |
| 495 | 534 |
| 496 | 535 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 529 if (gettimeofday(¤t_time, NULL) == -1) { | 568 if (gettimeofday(¤t_time, NULL) == -1) { |
| 530 return false; | 569 return false; |
| 531 } | 570 } |
| 532 | 571 |
| 533 // Calculate time for end of timeout. | 572 // Calculate time for end of timeout. |
| 534 struct timeval end_time; | 573 struct timeval end_time; |
| 535 timeradd(¤t_time, &delta, &end_time); | 574 timeradd(¤t_time, &delta, &end_time); |
| 536 | 575 |
| 537 struct timespec ts; | 576 struct timespec ts; |
| 538 TIMEVAL_TO_TIMESPEC(&end_time, &ts); | 577 TIMEVAL_TO_TIMESPEC(&end_time, &ts); |
| 578 |
| 579 int to = ts.tv_sec; |
| 580 |
| 539 while (true) { | 581 while (true) { |
| 540 int result = sem_trywait(&sem_); | 582 int result = sem_trywait(&sem_); |
| 541 if (result == 0) return true; // Successfully got semaphore. | 583 if (result == 0) return true; // Successfully got semaphore. |
| 542 if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. | 584 if (!to) return false; // Timeout. |
| 543 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. | 585 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. |
| 586 usleep(ts.tv_nsec / 1000); |
| 587 to--; |
| 544 } | 588 } |
| 545 } | 589 } |
| 546 | 590 |
| 547 | 591 |
| 548 Semaphore* OS::CreateSemaphore(int count) { | 592 Semaphore* OS::CreateSemaphore(int count) { |
| 549 return new OpenBSDSemaphore(count); | 593 return new OpenBSDSemaphore(count); |
| 550 } | 594 } |
| 551 | 595 |
| 552 | 596 |
| 553 #ifdef ENABLE_LOGGING_AND_PROFILING | 597 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 554 | 598 |
| 555 static Sampler* active_sampler_ = NULL; | 599 static pthread_t GetThreadID() { |
| 600 pthread_t thread_id = pthread_self(); |
| 601 return thread_id; |
| 602 } |
| 603 |
| 604 |
| 605 class Sampler::PlatformData : public Malloced { |
| 606 public: |
| 607 PlatformData() : vm_tid_(GetThreadID()) {} |
| 608 |
| 609 pthread_t vm_tid() const { return vm_tid_; } |
| 610 |
| 611 private: |
| 612 pthread_t vm_tid_; |
| 613 }; |
| 614 |
| 556 | 615 |
| 557 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { | 616 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { |
| 558 USE(info); | 617 USE(info); |
| 559 if (signal != SIGPROF) return; | 618 if (signal != SIGPROF) return; |
| 560 if (active_sampler_ == NULL) return; | 619 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 561 | 620 if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { |
| 562 TickSample sample; | 621 // We require a fully initialized and entered isolate. |
| 563 | 622 return; |
| 564 // We always sample the VM state. | 623 } |
| 565 sample.state = VMState::current_state(); | 624 if (v8::Locker::IsActive() && |
| 566 | 625 !isolate->thread_manager()->IsLockedByCurrentThread()) { |
| 567 active_sampler_->Tick(&sample); | 626 return; |
| 568 } | 627 } |
| 569 | 628 |
| 570 | 629 Sampler* sampler = isolate->logger()->sampler(); |
| 571 class Sampler::PlatformData : public Malloced { | 630 if (sampler == NULL || !sampler->IsActive()) return; |
| 631 |
| 632 TickSample sample_obj; |
| 633 TickSample* sample = CpuProfiler::TickSampleEvent(isolate); |
| 634 if (sample == NULL) sample = &sample_obj; |
| 635 |
| 636 // Extracting the sample from the context is extremely machine dependent. |
| 637 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| 638 sample->state = isolate->current_vm_state(); |
| 639 #if V8_HOST_ARCH_IA32 |
| 640 sample->pc = reinterpret_cast<Address>(ucontext->sc_eip); |
| 641 sample->sp = reinterpret_cast<Address>(ucontext->sc_esp); |
| 642 sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp); |
| 643 #elif V8_HOST_ARCH_X64 |
| 644 sample->pc = reinterpret_cast<Address>(ucontext->sc_rip); |
| 645 sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp); |
| 646 sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp); |
| 647 #elif V8_HOST_ARCH_ARM |
| 648 sample->pc = reinterpret_cast<Address>(ucontext->sc_r15); |
| 649 sample->sp = reinterpret_cast<Address>(ucontext->sc_r13); |
| 650 sample->fp = reinterpret_cast<Address>(ucontext->sc_r11); |
| 651 #endif |
| 652 sampler->SampleStack(sample); |
| 653 sampler->Tick(sample); |
| 654 } |
| 655 |
| 656 |
| 657 class SignalSender : public Thread { |
| 572 public: | 658 public: |
| 573 PlatformData() { | 659 enum SleepInterval { |
| 574 signal_handler_installed_ = false; | 660 HALF_INTERVAL, |
| 575 } | 661 FULL_INTERVAL |
| 576 | 662 }; |
| 577 bool signal_handler_installed_; | 663 |
| 578 struct sigaction old_signal_handler_; | 664 explicit SignalSender(int interval) |
| 579 struct itimerval old_timer_value_; | 665 : Thread(NULL, "SignalSender"), |
| 666 interval_(interval) {} |
| 667 |
| 668 static void AddActiveSampler(Sampler* sampler) { |
| 669 ScopedLock lock(mutex_); |
| 670 SamplerRegistry::AddActiveSampler(sampler); |
| 671 if (instance_ == NULL) { |
| 672 // Install a signal handler. |
| 673 struct sigaction sa; |
| 674 sa.sa_sigaction = ProfilerSignalHandler; |
| 675 sigemptyset(&sa.sa_mask); |
| 676 sa.sa_flags = SA_RESTART | SA_SIGINFO; |
| 677 signal_handler_installed_ = |
| 678 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); |
| 679 |
| 680 // Start a thread that sends SIGPROF signal to VM threads. |
| 681 instance_ = new SignalSender(sampler->interval()); |
| 682 instance_->Start(); |
| 683 } else { |
| 684 ASSERT(instance_->interval_ == sampler->interval()); |
| 685 } |
| 686 } |
| 687 |
| 688 static void RemoveActiveSampler(Sampler* sampler) { |
| 689 ScopedLock lock(mutex_); |
| 690 SamplerRegistry::RemoveActiveSampler(sampler); |
| 691 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { |
| 692 RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown(); |
| 693 instance_->Join(); |
| 694 delete instance_; |
| 695 instance_ = NULL; |
| 696 |
| 697 // Restore the old signal handler. |
| 698 if (signal_handler_installed_) { |
| 699 sigaction(SIGPROF, &old_signal_handler_, 0); |
| 700 signal_handler_installed_ = false; |
| 701 } |
| 702 } |
| 703 } |
| 704 |
| 705 // Implement Thread::Run(). |
| 706 virtual void Run() { |
| 707 SamplerRegistry::State state; |
| 708 while ((state = SamplerRegistry::GetState()) != |
| 709 SamplerRegistry::HAS_NO_SAMPLERS) { |
| 710 bool cpu_profiling_enabled = |
| 711 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); |
| 712 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); |
| 713 // When CPU profiling is enabled both JavaScript and C++ code is |
| 714 // profiled. We must not suspend. |
| 715 if (!cpu_profiling_enabled) { |
| 716 if (rate_limiter_.SuspendIfNecessary()) continue; |
| 717 } |
| 718 if (cpu_profiling_enabled && runtime_profiler_enabled) { |
| 719 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { |
| 720 return; |
| 721 } |
| 722 Sleep(HALF_INTERVAL); |
| 723 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { |
| 724 return; |
| 725 } |
| 726 Sleep(HALF_INTERVAL); |
| 727 } else { |
| 728 if (cpu_profiling_enabled) { |
| 729 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, |
| 730 this)) { |
| 731 return; |
| 732 } |
| 733 } |
| 734 if (runtime_profiler_enabled) { |
| 735 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, |
| 736 NULL)) { |
| 737 return; |
| 738 } |
| 739 } |
| 740 Sleep(FULL_INTERVAL); |
| 741 } |
| 742 } |
| 743 } |
| 744 |
| 745 static void DoCpuProfile(Sampler* sampler, void* raw_sender) { |
| 746 if (!sampler->IsProfiling()) return; |
| 747 SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender); |
| 748 sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); |
| 749 } |
| 750 |
| 751 static void DoRuntimeProfile(Sampler* sampler, void* ignored) { |
| 752 if (!sampler->isolate()->IsInitialized()) return; |
| 753 sampler->isolate()->runtime_profiler()->NotifyTick(); |
| 754 } |
| 755 |
| 756 void SendProfilingSignal(pthread_t tid) { |
| 757 if (!signal_handler_installed_) return; |
| 758 pthread_kill(tid, SIGPROF); |
| 759 } |
| 760 |
| 761 void Sleep(SleepInterval full_or_half) { |
| 762 // Convert ms to us and subtract 100 us to compensate delays |
| 763 // occuring during signal delivery. |
| 764 useconds_t interval = interval_ * 1000 - 100; |
| 765 if (full_or_half == HALF_INTERVAL) interval /= 2; |
| 766 int result = usleep(interval); |
| 767 #ifdef DEBUG |
| 768 if (result != 0 && errno != EINTR) { |
| 769 fprintf(stderr, |
| 770 "SignalSender usleep error; interval = %u, errno = %d\n", |
| 771 interval, |
| 772 errno); |
| 773 ASSERT(result == 0 || errno == EINTR); |
| 774 } |
| 775 #endif |
| 776 USE(result); |
| 777 } |
| 778 |
| 779 const int interval_; |
| 780 RuntimeProfilerRateLimiter rate_limiter_; |
| 781 |
| 782 // Protects the process wide state below. |
| 783 static Mutex* mutex_; |
| 784 static SignalSender* instance_; |
| 785 static bool signal_handler_installed_; |
| 786 static struct sigaction old_signal_handler_; |
| 787 |
| 788 DISALLOW_COPY_AND_ASSIGN(SignalSender); |
| 580 }; | 789 }; |
| 581 | 790 |
| 791 Mutex* SignalSender::mutex_ = OS::CreateMutex(); |
| 792 SignalSender* SignalSender::instance_ = NULL; |
| 793 struct sigaction SignalSender::old_signal_handler_; |
| 794 bool SignalSender::signal_handler_installed_ = false; |
| 795 |
| 582 | 796 |
| 583 Sampler::Sampler(Isolate* isolate, int interval) | 797 Sampler::Sampler(Isolate* isolate, int interval) |
| 584 : isolate_(isolate), | 798 : isolate_(isolate), |
| 585 interval_(interval), | 799 interval_(interval), |
| 586 profiling_(false), | 800 profiling_(false), |
| 587 active_(false), | 801 active_(false), |
| 588 samples_taken_(0) { | 802 samples_taken_(0) { |
| 589 data_ = new PlatformData(); | 803 data_ = new PlatformData; |
| 590 } | 804 } |
| 591 | 805 |
| 592 | 806 |
| 593 Sampler::~Sampler() { | 807 Sampler::~Sampler() { |
| 808 ASSERT(!IsActive()); |
| 594 delete data_; | 809 delete data_; |
| 595 } | 810 } |
| 596 | 811 |
| 597 | 812 |
| 598 void Sampler::Start() { | 813 void Sampler::Start() { |
| 599 // There can only be one active sampler at the time on POSIX | 814 ASSERT(!IsActive()); |
| 600 // platforms. | 815 SetActive(true); |
| 601 if (active_sampler_ != NULL) return; | 816 SignalSender::AddActiveSampler(this); |
| 602 | |
| 603 // Request profiling signals. | |
| 604 struct sigaction sa; | |
| 605 sa.sa_sigaction = ProfilerSignalHandler; | |
| 606 sigemptyset(&sa.sa_mask); | |
| 607 sa.sa_flags = SA_SIGINFO; | |
| 608 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; | |
| 609 data_->signal_handler_installed_ = true; | |
| 610 | |
| 611 // Set the itimer to generate a tick for each interval. | |
| 612 itimerval itimer; | |
| 613 itimer.it_interval.tv_sec = interval_ / 1000; | |
| 614 itimer.it_interval.tv_usec = (interval_ % 1000) * 1000; | |
| 615 itimer.it_value.tv_sec = itimer.it_interval.tv_sec; | |
| 616 itimer.it_value.tv_usec = itimer.it_interval.tv_usec; | |
| 617 setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_); | |
| 618 | |
| 619 // Set this sampler as the active sampler. | |
| 620 active_sampler_ = this; | |
| 621 active_ = true; | |
| 622 } | 817 } |
| 623 | 818 |
| 624 | 819 |
| 625 void Sampler::Stop() { | 820 void Sampler::Stop() { |
| 626 // Restore old signal handler | 821 ASSERT(IsActive()); |
| 627 if (data_->signal_handler_installed_) { | 822 SignalSender::RemoveActiveSampler(this); |
| 628 setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL); | 823 SetActive(false); |
| 629 sigaction(SIGPROF, &data_->old_signal_handler_, 0); | |
| 630 data_->signal_handler_installed_ = false; | |
| 631 } | |
| 632 | |
| 633 // This sampler is no longer the active sampler. | |
| 634 active_sampler_ = NULL; | |
| 635 active_ = false; | |
| 636 } | 824 } |
| 637 | 825 |
| 638 #endif // ENABLE_LOGGING_AND_PROFILING | 826 #endif // ENABLE_LOGGING_AND_PROFILING |
| 639 | 827 |
| 640 } } // namespace v8::internal | 828 } } // namespace v8::internal |
| OLD | NEW |