| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 81 static void* GetRandomMmapAddr() { | 81 static void* GetRandomMmapAddr() { |
| 82 Isolate* isolate = Isolate::UncheckedCurrent(); | 82 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 83 // Note that the current isolate isn't set up in a call path via | 83 // Note that the current isolate isn't set up in a call path via |
| 84 // CpuFeatures::Probe. We don't care about randomization in this case because | 84 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 85 // the code page is immediately freed. | 85 // the code page is immediately freed. |
| 86 if (isolate != NULL) { | 86 if (isolate != NULL) { |
| 87 #ifdef V8_TARGET_ARCH_X64 | 87 #ifdef V8_TARGET_ARCH_X64 |
| 88 uint64_t rnd1 = V8::RandomPrivate(isolate); | 88 uint64_t rnd1 = V8::RandomPrivate(isolate); |
| 89 uint64_t rnd2 = V8::RandomPrivate(isolate); | 89 uint64_t rnd2 = V8::RandomPrivate(isolate); |
| 90 uint64_t raw_addr = (rnd1 << 32) ^ rnd2; | 90 uint64_t raw_addr = (rnd1 << 32) ^ rnd2; |
| 91 // Currently available CPUs have 48 bits of virtual addressing. Truncate |
| 92 // the hint address to 46 bits to give the kernel a fighting chance of |
| 93 // fulfilling our placement request. |
| 91 raw_addr &= V8_UINT64_C(0x3ffffffff000); | 94 raw_addr &= V8_UINT64_C(0x3ffffffff000); |
| 92 #else | 95 #else |
| 93 uint32_t raw_addr = V8::RandomPrivate(isolate); | 96 uint32_t raw_addr = V8::RandomPrivate(isolate); |
| 94 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a | 97 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a |
| 95 // variety of ASLR modes (PAE kernel, NX compat mode, etc). | 98 // variety of ASLR modes (PAE kernel, NX compat mode, etc). |
| 96 raw_addr &= 0x3ffff000; | 99 raw_addr &= 0x3ffff000; |
| 97 raw_addr += 0x20000000; | 100 raw_addr += 0x20000000; |
| 98 #endif | 101 #endif |
| 99 return reinterpret_cast<void*>(raw_addr); | 102 return reinterpret_cast<void*>(raw_addr); |
| 100 } | 103 } |
| (...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 374 | 377 |
| 375 | 378 |
| 376 size_t OS::AllocateAlignment() { | 379 size_t OS::AllocateAlignment() { |
| 377 return sysconf(_SC_PAGESIZE); | 380 return sysconf(_SC_PAGESIZE); |
| 378 } | 381 } |
| 379 | 382 |
| 380 | 383 |
| 381 void* OS::Allocate(const size_t requested, | 384 void* OS::Allocate(const size_t requested, |
| 382 size_t* allocated, | 385 size_t* allocated, |
| 383 bool is_executable) { | 386 bool is_executable) { |
| 384 const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); | 387 const size_t msize = RoundUp(requested, AllocateAlignment()); |
| 385 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 388 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| 386 void* addr = GetRandomMmapAddr(); | 389 void* addr = GetRandomMmapAddr(); |
| 387 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 390 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| 388 if (mbase == MAP_FAILED) { | 391 if (mbase == MAP_FAILED) { |
| 389 LOG(i::Isolate::Current(), | 392 LOG(i::Isolate::Current(), |
| 390 StringEvent("OS::Allocate", "mmap failed")); | 393 StringEvent("OS::Allocate", "mmap failed")); |
| 391 return NULL; | 394 return NULL; |
| 392 } | 395 } |
| 393 *allocated = msize; | 396 *allocated = msize; |
| 394 UpdateAllocatedSpaceLimits(mbase, msize); | 397 UpdateAllocatedSpaceLimits(mbase, msize); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 fclose(file); | 470 fclose(file); |
| 468 return NULL; | 471 return NULL; |
| 469 } | 472 } |
| 470 void* memory = | 473 void* memory = |
| 471 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); | 474 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); |
| 472 return new PosixMemoryMappedFile(file, memory, size); | 475 return new PosixMemoryMappedFile(file, memory, size); |
| 473 } | 476 } |
| 474 | 477 |
| 475 | 478 |
| 476 PosixMemoryMappedFile::~PosixMemoryMappedFile() { | 479 PosixMemoryMappedFile::~PosixMemoryMappedFile() { |
| 477 if (memory_) munmap(memory_, size_); | 480 if (memory_) OS::Free(memory_, size_); |
| 478 fclose(file_); | 481 fclose(file_); |
| 479 } | 482 } |
| 480 | 483 |
| 481 | 484 |
| 482 void OS::LogSharedLibraryAddresses() { | 485 void OS::LogSharedLibraryAddresses() { |
| 483 // This function assumes that the layout of the file is as follows: | 486 // This function assumes that the layout of the file is as follows: |
| 484 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] | 487 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] |
| 485 // If we encounter an unexpected situation we abort scanning further entries. | 488 // If we encounter an unexpected situation we abort scanning further entries. |
| 486 FILE* fp = fopen("/proc/self/maps", "r"); | 489 FILE* fp = fopen("/proc/self/maps", "r"); |
| 487 if (fp == NULL) return; | 490 if (fp == NULL) return; |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 549 // PROT_EXEC so that analysis tools can properly attribute ticks. We | 552 // PROT_EXEC so that analysis tools can properly attribute ticks. We |
| 550 // do a mmap with a name known by ll_prof.py and immediately munmap | 553 // do a mmap with a name known by ll_prof.py and immediately munmap |
| 551 // it. This injects a GC marker into the stream of events generated | 554 // it. This injects a GC marker into the stream of events generated |
| 552 // by the kernel and allows us to synchronize V8 code log and the | 555 // by the kernel and allows us to synchronize V8 code log and the |
| 553 // kernel log. | 556 // kernel log. |
| 554 int size = sysconf(_SC_PAGESIZE); | 557 int size = sysconf(_SC_PAGESIZE); |
| 555 FILE* f = fopen(kGCFakeMmap, "w+"); | 558 FILE* f = fopen(kGCFakeMmap, "w+"); |
| 556 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, | 559 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, |
| 557 fileno(f), 0); | 560 fileno(f), 0); |
| 558 ASSERT(addr != MAP_FAILED); | 561 ASSERT(addr != MAP_FAILED); |
| 559 munmap(addr, size); | 562 OS::Free(addr, size); |
| 560 fclose(f); | 563 fclose(f); |
| 561 } | 564 } |
| 562 | 565 |
| 563 | 566 |
| 564 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 567 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
| 565 // backtrace is a glibc extension. | 568 // backtrace is a glibc extension. |
| 566 #ifdef __GLIBC__ | 569 #ifdef __GLIBC__ |
| 567 int frames_size = frames.length(); | 570 int frames_size = frames.length(); |
| 568 ScopedVector<void*> addresses(frames_size); | 571 ScopedVector<void*> addresses(frames_size); |
| 569 | 572 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 591 #else // ndef __GLIBC__ | 594 #else // ndef __GLIBC__ |
| 592 return 0; | 595 return 0; |
| 593 #endif // ndef __GLIBC__ | 596 #endif // ndef __GLIBC__ |
| 594 } | 597 } |
| 595 | 598 |
| 596 | 599 |
| 597 // Constants used for mmap. | 600 // Constants used for mmap. |
| 598 static const int kMmapFd = -1; | 601 static const int kMmapFd = -1; |
| 599 static const int kMmapFdOffset = 0; | 602 static const int kMmapFdOffset = 0; |
| 600 | 603 |
| 604 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
| 601 | 605 |
| 602 VirtualMemory::VirtualMemory(size_t size) { | 606 VirtualMemory::VirtualMemory(size_t size) { |
| 603 address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE, | 607 address_ = ReserveRegion(size); |
| 604 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | |
| 605 kMmapFd, kMmapFdOffset); | |
| 606 size_ = size; | 608 size_ = size; |
| 607 } | 609 } |
| 608 | 610 |
| 609 | 611 |
| 612 VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
| 613 : address_(NULL), size_(0) { |
| 614 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| 615 size_t request_size = RoundUp(size + alignment, |
| 616 static_cast<intptr_t>(OS::AllocateAlignment())); |
| 617 void* reservation = mmap(GetRandomMmapAddr(), |
| 618 request_size, |
| 619 PROT_NONE, |
| 620 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| 621 kMmapFd, |
| 622 kMmapFdOffset); |
| 623 if (reservation == MAP_FAILED) return; |
| 624 |
| 625 Address base = static_cast<Address>(reservation); |
| 626 Address aligned_base = RoundUp(base, alignment); |
| 627 ASSERT_LE(base, aligned_base); |
| 628 |
| 629 // Unmap extra memory reserved before and after the desired block. |
| 630 if (aligned_base != base) { |
| 631 size_t prefix_size = static_cast<size_t>(aligned_base - base); |
| 632 OS::Free(base, prefix_size); |
| 633 request_size -= prefix_size; |
| 634 } |
| 635 |
| 636 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); |
| 637 ASSERT_LE(aligned_size, request_size); |
| 638 |
| 639 if (aligned_size != request_size) { |
| 640 size_t suffix_size = request_size - aligned_size; |
| 641 OS::Free(aligned_base + aligned_size, suffix_size); |
| 642 request_size -= suffix_size; |
| 643 } |
| 644 |
| 645 ASSERT(aligned_size == request_size); |
| 646 |
| 647 address_ = static_cast<void*>(aligned_base); |
| 648 size_ = aligned_size; |
| 649 } |
| 650 |
| 651 |
| 610 VirtualMemory::~VirtualMemory() { | 652 VirtualMemory::~VirtualMemory() { |
| 611 if (IsReserved()) { | 653 if (IsReserved()) { |
| 612 if (0 == munmap(address(), size())) address_ = MAP_FAILED; | 654 bool result = ReleaseRegion(address(), size()); |
| 655 ASSERT(result); |
| 656 USE(result); |
| 613 } | 657 } |
| 614 } | 658 } |
| 615 | 659 |
| 616 | 660 |
| 617 bool VirtualMemory::IsReserved() { | 661 bool VirtualMemory::IsReserved() { |
| 618 return address_ != MAP_FAILED; | 662 return address_ != NULL; |
| 663 } |
| 664 |
| 665 |
| 666 void VirtualMemory::Reset() { |
| 667 address_ = NULL; |
| 668 size_ = 0; |
| 619 } | 669 } |
| 620 | 670 |
| 621 | 671 |
| 622 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | 672 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
| 673 return CommitRegion(address, size, is_executable); |
| 674 } |
| 675 |
| 676 |
| 677 bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 678 return UncommitRegion(address, size); |
| 679 } |
| 680 |
| 681 |
| 682 void* VirtualMemory::ReserveRegion(size_t size) { |
| 683 void* result = mmap(GetRandomMmapAddr(), |
| 684 size, |
| 685 PROT_NONE, |
| 686 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| 687 kMmapFd, |
| 688 kMmapFdOffset); |
| 689 |
| 690 if (result == MAP_FAILED) return NULL; |
| 691 |
| 692 return result; |
| 693 } |
| 694 |
| 695 |
| 696 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| 623 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 697 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| 624 if (MAP_FAILED == mmap(address, size, prot, | 698 if (MAP_FAILED == mmap(base, |
| 699 size, |
| 700 prot, |
| 625 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, | 701 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, |
| 626 kMmapFd, kMmapFdOffset)) { | 702 kMmapFd, |
| 703 kMmapFdOffset)) { |
| 627 return false; | 704 return false; |
| 628 } | 705 } |
| 629 | 706 |
| 630 UpdateAllocatedSpaceLimits(address, size); | 707 UpdateAllocatedSpaceLimits(base, size); |
| 631 return true; | 708 return true; |
| 632 } | 709 } |
| 633 | 710 |
| 634 | 711 |
| 635 bool VirtualMemory::Uncommit(void* address, size_t size) { | 712 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
| 636 return mmap(address, size, PROT_NONE, | 713 return mmap(base, |
| 714 size, |
| 715 PROT_NONE, |
| 637 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, | 716 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, |
| 638 kMmapFd, kMmapFdOffset) != MAP_FAILED; | 717 kMmapFd, |
| 718 kMmapFdOffset) != MAP_FAILED; |
| 719 } |
| 720 |
| 721 |
| 722 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| 723 return munmap(base, size) == 0; |
| 639 } | 724 } |
| 640 | 725 |
| 641 | 726 |
| 642 class Thread::PlatformData : public Malloced { | 727 class Thread::PlatformData : public Malloced { |
| 643 public: | 728 public: |
| 644 PlatformData() : thread_(kNoThread) {} | 729 PlatformData() : thread_(kNoThread) {} |
| 645 | 730 |
| 646 pthread_t thread_; // Thread handle for pthread. | 731 pthread_t thread_; // Thread handle for pthread. |
| 647 }; | 732 }; |
| 648 | 733 |
| (...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1137 | 1222 |
| 1138 | 1223 |
| 1139 void Sampler::Stop() { | 1224 void Sampler::Stop() { |
| 1140 ASSERT(IsActive()); | 1225 ASSERT(IsActive()); |
| 1141 SignalSender::RemoveActiveSampler(this); | 1226 SignalSender::RemoveActiveSampler(this); |
| 1142 SetActive(false); | 1227 SetActive(false); |
| 1143 } | 1228 } |
| 1144 | 1229 |
| 1145 | 1230 |
| 1146 } } // namespace v8::internal | 1231 } } // namespace v8::internal |
| OLD | NEW |