OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 743 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
754 // determining that pointers are outside the heap (used mostly in assertions | 754 // determining that pointers are outside the heap (used mostly in assertions |
755 // and verification). The estimate is conservative, i.e., not all addresses in | 755 // and verification). The estimate is conservative, i.e., not all addresses in |
756 // 'allocated' space are actually allocated to our heap. The range is | 756 // 'allocated' space are actually allocated to our heap. The range is |
757 // [lowest, highest), inclusive on the low and and exclusive on the high end. | 757 // [lowest, highest), inclusive on the low and and exclusive on the high end. |
758 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | 758 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); |
759 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | 759 static void* highest_ever_allocated = reinterpret_cast<void*>(0); |
760 | 760 |
761 | 761 |
762 static void UpdateAllocatedSpaceLimits(void* address, int size) { | 762 static void UpdateAllocatedSpaceLimits(void* address, int size) { |
763 ASSERT(limit_mutex != NULL); | 763 ASSERT(limit_mutex != NULL); |
764 ScopedLock lock(limit_mutex); | 764 LockGuard<Mutex> lock_guard(limit_mutex); |
765 | 765 |
766 lowest_ever_allocated = Min(lowest_ever_allocated, address); | 766 lowest_ever_allocated = Min(lowest_ever_allocated, address); |
767 highest_ever_allocated = | 767 highest_ever_allocated = |
768 Max(highest_ever_allocated, | 768 Max(highest_ever_allocated, |
769 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | 769 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
770 } | 770 } |
771 | 771 |
772 | 772 |
773 bool OS::IsOutsideAllocatedSpace(void* pointer) { | 773 bool OS::IsOutsideAllocatedSpace(void* pointer) { |
774 if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated) | 774 if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated) |
(...skipping 834 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1609 } | 1609 } |
1610 | 1610 |
1611 | 1611 |
1612 | 1612 |
1613 void Thread::YieldCPU() { | 1613 void Thread::YieldCPU() { |
1614 Sleep(0); | 1614 Sleep(0); |
1615 } | 1615 } |
1616 | 1616 |
1617 | 1617 |
1618 // ---------------------------------------------------------------------------- | 1618 // ---------------------------------------------------------------------------- |
1619 // Win32 mutex support. | |
1620 // | |
1621 // On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are | |
1622 // faster than Win32 Mutex objects because they are implemented using user mode | |
1623 // atomic instructions. Therefore we only do ring transitions if there is lock | |
1624 // contention. | |
1625 | |
1626 class Win32Mutex : public Mutex { | |
1627 public: | |
1628 Win32Mutex() { InitializeCriticalSection(&cs_); } | |
1629 | |
1630 virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); } | |
1631 | |
1632 virtual int Lock() { | |
1633 EnterCriticalSection(&cs_); | |
1634 return 0; | |
1635 } | |
1636 | |
1637 virtual int Unlock() { | |
1638 LeaveCriticalSection(&cs_); | |
1639 return 0; | |
1640 } | |
1641 | |
1642 | |
1643 virtual bool TryLock() { | |
1644 // Returns non-zero if critical section is entered successfully entered. | |
1645 return TryEnterCriticalSection(&cs_); | |
1646 } | |
1647 | |
1648 private: | |
1649 CRITICAL_SECTION cs_; // Critical section used for mutex | |
1650 }; | |
1651 | |
1652 | |
1653 Mutex* OS::CreateMutex() { | |
1654 return new Win32Mutex(); | |
1655 } | |
1656 | |
1657 | |
1658 // ---------------------------------------------------------------------------- | |
1659 // Win32 semaphore support. | 1619 // Win32 semaphore support. |
1660 // | 1620 // |
1661 // On Win32 semaphores are implemented using Win32 Semaphore objects. The | 1621 // On Win32 semaphores are implemented using Win32 Semaphore objects. The |
1662 // semaphores are anonymous. Also, the semaphores are initialized to have | 1622 // semaphores are anonymous. Also, the semaphores are initialized to have |
1663 // no upper limit on count. | 1623 // no upper limit on count. |
1664 | 1624 |
1665 | 1625 |
1666 class Win32Semaphore : public Semaphore { | 1626 class Win32Semaphore : public Semaphore { |
1667 public: | 1627 public: |
1668 explicit Win32Semaphore(int count) { | 1628 explicit Win32Semaphore(int count) { |
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1891 | 1851 |
1892 | 1852 |
1893 void OS::SetUp() { | 1853 void OS::SetUp() { |
1894 // Seed the random number generator. | 1854 // Seed the random number generator. |
1895 // Convert the current time to a 64-bit integer first, before converting it | 1855 // Convert the current time to a 64-bit integer first, before converting it |
1896 // to an unsigned. Going directly can cause an overflow and the seed to be | 1856 // to an unsigned. Going directly can cause an overflow and the seed to be |
1897 // set to all ones. The seed will be identical for different instances that | 1857 // set to all ones. The seed will be identical for different instances that |
1898 // call this setup code within the same millisecond. | 1858 // call this setup code within the same millisecond. |
1899 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); | 1859 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); |
1900 srand(static_cast<unsigned int>(seed)); | 1860 srand(static_cast<unsigned int>(seed)); |
1901 limit_mutex = CreateMutex(); | 1861 limit_mutex = new Mutex(); |
1902 } | 1862 } |
1903 | 1863 |
1904 | 1864 |
1905 void OS::TearDown() { | 1865 void OS::TearDown() { |
1906 delete limit_mutex; | 1866 delete limit_mutex; |
1907 } | 1867 } |
1908 | 1868 |
1909 | 1869 |
1910 } } // namespace v8::internal | 1870 } } // namespace v8::internal |
OLD | NEW |