OLD | NEW |
---|---|
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 1484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1496 ASSERT(IsReserved()); | 1496 ASSERT(IsReserved()); |
1497 return UncommitRegion(address, size); | 1497 return UncommitRegion(address, size); |
1498 } | 1498 } |
1499 | 1499 |
1500 | 1500 |
1501 void* VirtualMemory::ReserveRegion(size_t size) { | 1501 void* VirtualMemory::ReserveRegion(size_t size) { |
1502 return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); | 1502 return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); |
1503 } | 1503 } |
1504 | 1504 |
1505 | 1505 |
1506 void* VirtualMemory::ReserveAlignedRegion(size_t size, size_t alignment) { | |
1507 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
1508 size_t request_size = RoundUp(size + alignment, | |
1509 static_cast<intptr_t>(OS::AllocateAlignment())); | |
1510 Address result; | |
1511 do { | |
1512 void* reservation = | |
1513 VirtualAlloc(NULL, request_size, MEM_RESERVE, PAGE_NOACCESS); | |
1514 // If we can't allocate at all, give up. | |
1515 if (reservation == NULL) return NULL; | |
1516 Address base = static_cast<Address>(reservation); | |
1517 Address aligned_base = RoundUp(base, alignment); | |
1518 ASSERT(base <= aligned_base); | |
1519 // Try to reallocate part of the original allocation by first freeing it, | |
1520 // and then allocating with a specific target address in the middle of | |
1521 // the freed area. | |
1522 // This can fail in the rare case that someone else in the same process | |
1523 // allocates the same memory between the VirtualFree and VirtualAlloc | |
1524 // calls. In that case, just try again. | |
1525 VirtualFree(reservation, 0, MEM_RELEASE); | |
1526 reservation = VirtualAlloc(aligned_base, size, MEM_RESERVE, PAGE_NOACCESS); | |
1527 result = static_cast<Address>(reservation); | |
1528 ASSERT(result == NULL || result == aligned_base); | |
1529 } while (result == NULL); | |
Vyacheslav Egorov (Chromium)
2011/09/12 13:42:48
this looks too fragile and might go into infinite
Lasse Reichstein
2011/09/12 19:18:02
This was an attempt to avoid finding a place to st
| |
1530 | |
1531 return static_cast<void*>(result); | |
1532 } | |
1533 | |
1534 | |
1506 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 1535 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
1507 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 1536 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
1508 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { | 1537 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { |
1509 return false; | 1538 return false; |
1510 } | 1539 } |
1511 | 1540 |
1512 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); | 1541 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); |
1513 return true; | 1542 return true; |
1514 } | 1543 } |
1515 | 1544 |
1516 | 1545 |
1517 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 1546 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
1518 return VirtualFree(base, size, MEM_DECOMMIT) != false; | 1547 return VirtualFree(base, size, MEM_DECOMMIT) != 0; |
1519 } | 1548 } |
1520 | 1549 |
1521 | 1550 |
1522 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 1551 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
1523 return VirtualFree(base, size, MEM_DECOMMIT) != false; | 1552 return VirtualFree(base, 0, MEM_DECOMMIT) != 0; |
Vyacheslav Egorov (Chromium)
2011/09/12 13:42:48
Should not it become MEM_RELEASE?
Lasse Reichstein
2011/09/12 19:18:02
Absolutely! It should have been that all along! Th
| |
1524 } | 1553 } |
1525 | 1554 |
1526 | 1555 |
1527 | 1556 |
1528 // ---------------------------------------------------------------------------- | 1557 // ---------------------------------------------------------------------------- |
1529 // Win32 thread support. | 1558 // Win32 thread support. |
1530 | 1559 |
1531 // Definition of invalid thread handle and id. | 1560 // Definition of invalid thread handle and id. |
1532 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; | 1561 static const HANDLE kNoThread = INVALID_HANDLE_VALUE; |
1533 | 1562 |
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2063 | 2092 |
2064 | 2093 |
2065 void Sampler::Stop() { | 2094 void Sampler::Stop() { |
2066 ASSERT(IsActive()); | 2095 ASSERT(IsActive()); |
2067 SamplerThread::RemoveActiveSampler(this); | 2096 SamplerThread::RemoveActiveSampler(this); |
2068 SetActive(false); | 2097 SetActive(false); |
2069 } | 2098 } |
2070 | 2099 |
2071 | 2100 |
2072 } } // namespace v8::internal | 2101 } } // namespace v8::internal |
OLD | NEW |