| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 62 // Extra functions for MinGW. Most of these are the _s functions which are in | 62 // Extra functions for MinGW. Most of these are the _s functions which are in |
| 63 // the Microsoft Visual Studio C++ CRT. | 63 // the Microsoft Visual Studio C++ CRT. |
| 64 #ifdef __MINGW32__ | 64 #ifdef __MINGW32__ |
| 65 | 65 |
| 66 | 66 |
| 67 #ifndef __MINGW64_VERSION_MAJOR | 67 #ifndef __MINGW64_VERSION_MAJOR |
| 68 | 68 |
| 69 #define _TRUNCATE 0 | 69 #define _TRUNCATE 0 |
| 70 #define STRUNCATE 80 | 70 #define STRUNCATE 80 |
| 71 | 71 |
| 72 inline void MemoryBarrier() { | |
| 73 int barrier = 0; | |
| 74 __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier)); | |
| 75 } | |
| 76 | |
| 77 #endif // __MINGW64_VERSION_MAJOR | 72 #endif // __MINGW64_VERSION_MAJOR |
| 78 | 73 |
| 79 | 74 |
| 80 int localtime_s(tm* out_tm, const time_t* time) { | 75 int localtime_s(tm* out_tm, const time_t* time) { |
| 81 tm* posix_local_time_struct = localtime(time); | 76 tm* posix_local_time_struct = localtime(time); |
| 82 if (posix_local_time_struct == NULL) return 1; | 77 if (posix_local_time_struct == NULL) return 1; |
| 83 *out_tm = *posix_local_time_struct; | 78 *out_tm = *posix_local_time_struct; |
| 84 return 0; | 79 return 0; |
| 85 } | 80 } |
| 86 | 81 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 CHECK_GT(dest_size, 0); | 116 CHECK_GT(dest_size, 0); |
| 122 *dest = 0; | 117 *dest = 0; |
| 123 return 0; | 118 return 0; |
| 124 } | 119 } |
| 125 | 120 |
| 126 #endif // __MINGW32__ | 121 #endif // __MINGW32__ |
| 127 | 122 |
| 128 namespace v8 { | 123 namespace v8 { |
| 129 namespace internal { | 124 namespace internal { |
| 130 | 125 |
| 131 intptr_t OS::MaxVirtualMemory() { | |
| 132 return 0; | |
| 133 } | |
| 134 | |
| 135 | |
| 136 double ceiling(double x) { | 126 double ceiling(double x) { |
| 137 return ceil(x); | 127 return ceil(x); |
| 138 } | 128 } |
| 139 | 129 |
| 140 | 130 |
| 141 #if V8_TARGET_ARCH_IA32 | 131 #if V8_TARGET_ARCH_IA32 |
| 142 static void MemMoveWrapper(void* dest, const void* src, size_t size) { | 132 static void MemMoveWrapper(void* dest, const void* src, size_t size) { |
| 143 memmove(dest, src, size); | 133 memmove(dest, src, size); |
| 144 } | 134 } |
| 145 | 135 |
| (...skipping 590 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 736 int result = strncpy_s(dest.start(), dest.length(), src, n); | 726 int result = strncpy_s(dest.start(), dest.length(), src, n); |
| 737 USE(result); | 727 USE(result); |
| 738 ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE)); | 728 ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE)); |
| 739 } | 729 } |
| 740 | 730 |
| 741 | 731 |
| 742 #undef _TRUNCATE | 732 #undef _TRUNCATE |
| 743 #undef STRUNCATE | 733 #undef STRUNCATE |
| 744 | 734 |
| 745 | 735 |
| 746 // Get the system's page size used by VirtualAlloc() or the next power | |
| 747 // of two. The reason for always returning a power of two is that the | |
| 748 // rounding up in OS::Allocate expects that. | |
| 749 static size_t GetPageSize() { | |
| 750 static size_t page_size = 0; | |
| 751 if (page_size == 0) { | |
| 752 SYSTEM_INFO info; | |
| 753 GetSystemInfo(&info); | |
| 754 page_size = RoundUpToPowerOf2(info.dwPageSize); | |
| 755 } | |
| 756 return page_size; | |
| 757 } | |
| 758 | |
| 759 | |
| 760 // The allocation alignment is the guaranteed alignment for | |
| 761 // VirtualAlloc'ed blocks of memory. | |
| 762 size_t OS::AllocateAlignment() { | |
| 763 static size_t allocate_alignment = 0; | |
| 764 if (allocate_alignment == 0) { | |
| 765 SYSTEM_INFO info; | |
| 766 GetSystemInfo(&info); | |
| 767 allocate_alignment = info.dwAllocationGranularity; | |
| 768 } | |
| 769 return allocate_alignment; | |
| 770 } | |
| 771 | |
| 772 | |
| 773 void* OS::GetRandomMmapAddr() { | |
| 774 Isolate* isolate = Isolate::UncheckedCurrent(); | |
| 775 // Note that the current isolate isn't set up in a call path via | |
| 776 // CpuFeatures::Probe. We don't care about randomization in this case because | |
| 777 // the code page is immediately freed. | |
| 778 if (isolate != NULL) { | |
| 779 // The address range used to randomize RWX allocations in OS::Allocate | |
| 780 // Try not to map pages into the default range that windows loads DLLs | |
| 781 // Use a multiple of 64k to prevent committing unused memory. | |
| 782 // Note: This does not guarantee RWX regions will be within the | |
| 783 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax | |
| 784 #ifdef V8_HOST_ARCH_64_BIT | |
| 785 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; | |
| 786 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; | |
| 787 #else | |
| 788 static const intptr_t kAllocationRandomAddressMin = 0x04000000; | |
| 789 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; | |
| 790 #endif | |
| 791 uintptr_t address = | |
| 792 (isolate->random_number_generator()->NextInt() << kPageSizeBits) | | |
| 793 kAllocationRandomAddressMin; | |
| 794 address &= kAllocationRandomAddressMax; | |
| 795 return reinterpret_cast<void *>(address); | |
| 796 } | |
| 797 return NULL; | |
| 798 } | |
| 799 | |
| 800 | |
| 801 static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { | |
| 802 LPVOID base = NULL; | |
| 803 | |
| 804 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { | |
| 805 // For exectutable pages try and randomize the allocation address | |
| 806 for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { | |
| 807 base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection); | |
| 808 } | |
| 809 } | |
| 810 | |
| 811 // After three attempts give up and let the OS find an address to use. | |
| 812 if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); | |
| 813 | |
| 814 return base; | |
| 815 } | |
| 816 | |
| 817 | |
| 818 void* OS::Allocate(const size_t requested, | |
| 819 size_t* allocated, | |
| 820 bool is_executable) { | |
| 821 // VirtualAlloc rounds allocated size to page size automatically. | |
| 822 size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); | |
| 823 | |
| 824 // Windows XP SP2 allows Data Excution Prevention (DEP). | |
| 825 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | |
| 826 | |
| 827 LPVOID mbase = RandomizedVirtualAlloc(msize, | |
| 828 MEM_COMMIT | MEM_RESERVE, | |
| 829 prot); | |
| 830 | |
| 831 if (mbase == NULL) { | |
| 832 LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed")); | |
| 833 return NULL; | |
| 834 } | |
| 835 | |
| 836 ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); | |
| 837 | |
| 838 *allocated = msize; | |
| 839 return mbase; | |
| 840 } | |
| 841 | |
| 842 | |
| 843 void OS::Free(void* address, const size_t size) { | |
| 844 // TODO(1240712): VirtualFree has a return value which is ignored here. | |
| 845 VirtualFree(address, 0, MEM_RELEASE); | |
| 846 USE(size); | |
| 847 } | |
| 848 | |
| 849 | |
| 850 intptr_t OS::CommitPageSize() { | |
| 851 return 4096; | |
| 852 } | |
| 853 | |
| 854 | |
| 855 void OS::ProtectCode(void* address, const size_t size) { | |
| 856 DWORD old_protect; | |
| 857 VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); | |
| 858 } | |
| 859 | |
| 860 | |
| 861 void OS::Guard(void* address, const size_t size) { | |
| 862 DWORD oldprotect; | |
| 863 VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); | |
| 864 } | |
| 865 | |
| 866 | |
| 867 void OS::Sleep(int milliseconds) { | 736 void OS::Sleep(int milliseconds) { |
| 868 ::Sleep(milliseconds); | 737 ::Sleep(milliseconds); |
| 869 } | 738 } |
| 870 | 739 |
| 871 | 740 |
| 872 void OS::Abort() { | 741 void OS::Abort() { |
| 873 if (IsDebuggerPresent() || FLAG_break_on_abort) { | 742 if (IsDebuggerPresent() || FLAG_break_on_abort) { |
| 874 DebugBreak(); | 743 DebugBreak(); |
| 875 } else { | 744 } else { |
| 876 // Make the MSVCRT do a silent abort. | 745 // Make the MSVCRT do a silent abort. |
| (...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1361 #elif defined(__MINGW32__) | 1230 #elif defined(__MINGW32__) |
| 1362 // With gcc 4.4 the tree vectorization optimizer can generate code | 1231 // With gcc 4.4 the tree vectorization optimizer can generate code |
| 1363 // that requires 16 byte alignment such as movdqa on x86. | 1232 // that requires 16 byte alignment such as movdqa on x86. |
| 1364 return 16; | 1233 return 16; |
| 1365 #else | 1234 #else |
| 1366 return 8; // Floating-point math runs faster with 8-byte alignment. | 1235 return 8; // Floating-point math runs faster with 8-byte alignment. |
| 1367 #endif | 1236 #endif |
| 1368 } | 1237 } |
| 1369 | 1238 |
| 1370 | 1239 |
| 1371 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | |
| 1372 | |
| 1373 | |
| 1374 VirtualMemory::VirtualMemory(size_t size) | |
| 1375 : address_(ReserveRegion(size)), size_(size) { } | |
| 1376 | |
| 1377 | |
| 1378 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | |
| 1379 : address_(NULL), size_(0) { | |
| 1380 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
| 1381 size_t request_size = RoundUp(size + alignment, | |
| 1382 static_cast<intptr_t>(OS::AllocateAlignment())); | |
| 1383 void* address = ReserveRegion(request_size); | |
| 1384 if (address == NULL) return; | |
| 1385 Address base = RoundUp(static_cast<Address>(address), alignment); | |
| 1386 // Try reducing the size by freeing and then reallocating a specific area. | |
| 1387 bool result = ReleaseRegion(address, request_size); | |
| 1388 USE(result); | |
| 1389 ASSERT(result); | |
| 1390 address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); | |
| 1391 if (address != NULL) { | |
| 1392 request_size = size; | |
| 1393 ASSERT(base == static_cast<Address>(address)); | |
| 1394 } else { | |
| 1395 // Resizing failed, just go with a bigger area. | |
| 1396 address = ReserveRegion(request_size); | |
| 1397 if (address == NULL) return; | |
| 1398 } | |
| 1399 address_ = address; | |
| 1400 size_ = request_size; | |
| 1401 } | |
| 1402 | |
| 1403 | |
| 1404 VirtualMemory::~VirtualMemory() { | |
| 1405 if (IsReserved()) { | |
| 1406 bool result = ReleaseRegion(address(), size()); | |
| 1407 ASSERT(result); | |
| 1408 USE(result); | |
| 1409 } | |
| 1410 } | |
| 1411 | |
| 1412 | |
| 1413 bool VirtualMemory::IsReserved() { | |
| 1414 return address_ != NULL; | |
| 1415 } | |
| 1416 | |
| 1417 | |
| 1418 void VirtualMemory::Reset() { | |
| 1419 address_ = NULL; | |
| 1420 size_ = 0; | |
| 1421 } | |
| 1422 | |
| 1423 | |
| 1424 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | |
| 1425 return CommitRegion(address, size, is_executable); | |
| 1426 } | |
| 1427 | |
| 1428 | |
| 1429 bool VirtualMemory::Uncommit(void* address, size_t size) { | |
| 1430 ASSERT(IsReserved()); | |
| 1431 return UncommitRegion(address, size); | |
| 1432 } | |
| 1433 | |
| 1434 | |
| 1435 bool VirtualMemory::Guard(void* address) { | |
| 1436 if (NULL == VirtualAlloc(address, | |
| 1437 OS::CommitPageSize(), | |
| 1438 MEM_COMMIT, | |
| 1439 PAGE_NOACCESS)) { | |
| 1440 return false; | |
| 1441 } | |
| 1442 return true; | |
| 1443 } | |
| 1444 | |
| 1445 | |
| 1446 void* VirtualMemory::ReserveRegion(size_t size) { | |
| 1447 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); | |
| 1448 } | |
| 1449 | |
| 1450 | |
| 1451 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
| 1452 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | |
| 1453 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { | |
| 1454 return false; | |
| 1455 } | |
| 1456 return true; | |
| 1457 } | |
| 1458 | |
| 1459 | |
| 1460 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | |
| 1461 return VirtualFree(base, size, MEM_DECOMMIT) != 0; | |
| 1462 } | |
| 1463 | |
| 1464 | |
| 1465 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | |
| 1466 return VirtualFree(base, 0, MEM_RELEASE) != 0; | |
| 1467 } | |
| 1468 | |
| 1469 | |
| 1470 bool VirtualMemory::HasLazyCommits() { | |
| 1471 // TODO(alph): implement for the platform. | |
| 1472 return false; | |
| 1473 } | |
| 1474 | |
| 1475 | |
| 1476 // ---------------------------------------------------------------------------- | 1240 // ---------------------------------------------------------------------------- |
| 1477 // Win32 thread support. | 1241 // Win32 thread support. |
| 1478 | 1242 |
| 1479 // Definition of invalid thread handle and id. | 1243 // Definition of invalid thread handle and id. |
| 1480 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; | 1244 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; |
| 1481 | 1245 |
| 1482 // Entry point for threads. The supplied argument is a pointer to the thread | 1246 // Entry point for threads. The supplied argument is a pointer to the thread |
| 1483 // object. The entry function dispatches to the run method in the thread | 1247 // object. The entry function dispatches to the run method in the thread |
| 1484 // object. It is important that this function has __stdcall calling | 1248 // object. It is important that this function has __stdcall calling |
| 1485 // convention. | 1249 // convention. |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1569 ASSERT(result); | 1333 ASSERT(result); |
| 1570 } | 1334 } |
| 1571 | 1335 |
| 1572 | 1336 |
| 1573 | 1337 |
| 1574 void Thread::YieldCPU() { | 1338 void Thread::YieldCPU() { |
| 1575 Sleep(0); | 1339 Sleep(0); |
| 1576 } | 1340 } |
| 1577 | 1341 |
| 1578 } } // namespace v8::internal | 1342 } } // namespace v8::internal |
| OLD | NEW |