OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 632 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
643 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | 643 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
644 kMmapFd, | 644 kMmapFd, |
645 kMmapFdOffset); | 645 kMmapFdOffset); |
646 | 646 |
647 if (result == MAP_FAILED) return NULL; | 647 if (result == MAP_FAILED) return NULL; |
648 | 648 |
649 return result; | 649 return result; |
650 } | 650 } |
651 | 651 |
652 | 652 |
| 653 void* VirtualMemory::ReserveAlignedRegion(size_t size, size_t alignment) { |
| 654 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| 655 size_t request_size = RoundUp(size + alignment, |
| 656 static_cast<intptr_t>(OS::AllocateAlignment())); |
| 657 void* reservation = mmap(GetRandomMmapAddr(), |
| 658 request_size, |
| 659 PROT_NONE, |
| 660 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| 661 kMmapFd, |
| 662 kMmapFdOffset); |
| 663 if (reservation == MAP_FAILED) return NULL; |
| 664 Address base = static_cast<Address>(reservation); |
| 665 Address aligned_base = RoundUp(base, alignment); |
| 666 ASSERT(base <= aligned_base); |
| 667 |
| 668 // Unmap extra memory reserved before and after the desired block. |
| 669 size_t bytes_prior = static_cast<size_t>(aligned_base - base); |
| 670 if (bytes_prior > 0) { |
| 671 munmap(base, bytes_prior); |
| 672 } |
| 673 if (static_cast<size_t>(aligned_base - base) < request_size - size) { |
| 674 munmap(aligned_base + size, request_size - size - bytes_prior); |
| 675 } |
| 676 |
| 677 return static_cast<void*>(aligned_base); |
| 678 } |
| 679 |
| 680 |
653 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 681 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
654 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 682 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
655 if (MAP_FAILED == mmap(base, | 683 if (MAP_FAILED == mmap(base, |
656 size, | 684 size, |
657 prot, | 685 prot, |
658 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, | 686 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, |
659 kMmapFd, | 687 kMmapFd, |
660 kMmapFdOffset)) { | 688 kMmapFdOffset)) { |
661 return false; | 689 return false; |
662 } | 690 } |
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1179 | 1207 |
1180 | 1208 |
1181 void Sampler::Stop() { | 1209 void Sampler::Stop() { |
1182 ASSERT(IsActive()); | 1210 ASSERT(IsActive()); |
1183 SignalSender::RemoveActiveSampler(this); | 1211 SignalSender::RemoveActiveSampler(this); |
1184 SetActive(false); | 1212 SetActive(false); |
1185 } | 1213 } |
1186 | 1214 |
1187 | 1215 |
1188 } } // namespace v8::internal | 1216 } } // namespace v8::internal |
OLD | NEW |