| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 66 time_t utc = time(NULL); | 66 time_t utc = time(NULL); |
| 67 ASSERT(utc != -1); | 67 ASSERT(utc != -1); |
| 68 struct tm* loc = localtime(&utc); | 68 struct tm* loc = localtime(&utc); |
| 69 ASSERT(loc != NULL); | 69 ASSERT(loc != NULL); |
| 70 // time - localtime includes any daylight savings offset, so subtract it. | 70 // time - localtime includes any daylight savings offset, so subtract it. |
| 71 return static_cast<double>((mktime(loc) - utc) * msPerSecond - | 71 return static_cast<double>((mktime(loc) - utc) * msPerSecond - |
| 72 (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); | 72 (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0)); |
| 73 } | 73 } |
| 74 | 74 |
| 75 | 75 |
| 76 void* OS::Allocate(const size_t requested, | |
| 77 size_t* allocated, | |
| 78 bool is_executable) { | |
| 79 const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); | |
| 80 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
| 81 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
| 82 if (mbase == MAP_FAILED) { | |
| 83 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); | |
| 84 return NULL; | |
| 85 } | |
| 86 *allocated = msize; | |
| 87 return mbase; | |
| 88 } | |
| 89 | |
| 90 | |
| 91 void OS::DumpBacktrace() { | 76 void OS::DumpBacktrace() { |
| 92 // Currently unsupported. | 77 // Currently unsupported. |
| 93 } | 78 } |
| 94 | 79 |
| 95 | 80 |
| 96 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 81 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
| 97 public: | 82 public: |
| 98 PosixMemoryMappedFile(FILE* file, void* memory, int size) | 83 PosixMemoryMappedFile(FILE* file, void* memory, int size) |
| 99 : file_(file), memory_(memory), size_(size) { } | 84 : file_(file), memory_(memory), size_(size) { } |
| 100 virtual ~PosixMemoryMappedFile(); | 85 virtual ~PosixMemoryMappedFile(); |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 217 // platforms does not work well because Cygwin does not support MAP_FIXED. | 202 // platforms does not work well because Cygwin does not support MAP_FIXED. |
| 218 // This causes VirtualMemory::Commit to not always commit the memory region | 203 // This causes VirtualMemory::Commit to not always commit the memory region |
| 219 // specified. | 204 // specified. |
| 220 | 205 |
| 221 static void* GetRandomAddr() { | 206 static void* GetRandomAddr() { |
| 222 Isolate* isolate = Isolate::UncheckedCurrent(); | 207 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 223 // Note that the current isolate isn't set up in a call path via | 208 // Note that the current isolate isn't set up in a call path via |
| 224 // CpuFeatures::Probe. We don't care about randomization in this case because | 209 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 225 // the code page is immediately freed. | 210 // the code page is immediately freed. |
| 226 if (isolate != NULL) { | 211 if (isolate != NULL) { |
| 227 // The address range used to randomize RWX allocations in OS::Allocate | 212 // The address range used to randomize RWX allocations in |
| 213 // VirtualMemory::AllocateRegion(). |
| 228 // Try not to map pages into the default range that windows loads DLLs | 214 // Try not to map pages into the default range that windows loads DLLs |
| 229 // Use a multiple of 64k to prevent committing unused memory. | 215 // Use a multiple of 64k to prevent committing unused memory. |
| 230 // Note: This does not guarantee RWX regions will be within the | 216 // Note: This does not guarantee RWX regions will be within the |
| 231 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax | 217 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax |
| 232 #ifdef V8_HOST_ARCH_64_BIT | 218 #ifdef V8_HOST_ARCH_64_BIT |
| 233 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; | 219 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; |
| 234 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; | 220 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; |
| 235 #else | 221 #else |
| 236 static const intptr_t kAllocationRandomAddressMin = 0x04000000; | 222 static const intptr_t kAllocationRandomAddressMin = 0x04000000; |
| 237 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; | 223 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; |
| 238 #endif | 224 #endif |
| 239 uintptr_t address = | 225 uintptr_t address = |
| 240 (isolate->random_number_generator()->NextInt() << kPageSizeBits) | | 226 (isolate->random_number_generator()->NextInt() << kPageSizeBits) | |
| 241 kAllocationRandomAddressMin; | 227 kAllocationRandomAddressMin; |
| 242 address &= kAllocationRandomAddressMax; | 228 address &= kAllocationRandomAddressMax; |
| 243 return reinterpret_cast<void *>(address); | 229 return reinterpret_cast<void *>(address); |
| 244 } | 230 } |
| 245 return NULL; | 231 return NULL; |
| 246 } | 232 } |
| 247 | 233 |
| 248 | |
| 249 static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { | |
| 250 LPVOID base = NULL; | |
| 251 | |
| 252 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { | |
| 253 // For exectutable pages try and randomize the allocation address | |
| 254 for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { | |
| 255 base = VirtualAlloc(GetRandomAddr(), size, action, protection); | |
| 256 } | |
| 257 } | |
| 258 | |
| 259 // After three attempts give up and let the OS find an address to use. | |
| 260 if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); | |
| 261 | |
| 262 return base; | |
| 263 } | |
| 264 | |
| 265 | |
| 266 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | |
| 267 | |
| 268 | |
| 269 VirtualMemory::VirtualMemory(size_t size) | |
| 270 : address_(ReserveRegion(size)), size_(size) { } | |
| 271 | |
| 272 | |
| 273 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | |
| 274 : address_(NULL), size_(0) { | |
| 275 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
| 276 size_t request_size = RoundUp(size + alignment, | |
| 277 static_cast<intptr_t>(OS::AllocateAlignment())); | |
| 278 void* address = ReserveRegion(request_size); | |
| 279 if (address == NULL) return; | |
| 280 Address base = RoundUp(static_cast<Address>(address), alignment); | |
| 281 // Try reducing the size by freeing and then reallocating a specific area. | |
| 282 bool result = ReleaseRegion(address, request_size); | |
| 283 USE(result); | |
| 284 ASSERT(result); | |
| 285 address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); | |
| 286 if (address != NULL) { | |
| 287 request_size = size; | |
| 288 ASSERT(base == static_cast<Address>(address)); | |
| 289 } else { | |
| 290 // Resizing failed, just go with a bigger area. | |
| 291 address = ReserveRegion(request_size); | |
| 292 if (address == NULL) return; | |
| 293 } | |
| 294 address_ = address; | |
| 295 size_ = request_size; | |
| 296 } | |
| 297 | |
| 298 | |
| 299 VirtualMemory::~VirtualMemory() { | |
| 300 if (IsReserved()) { | |
| 301 bool result = ReleaseRegion(address_, size_); | |
| 302 ASSERT(result); | |
| 303 USE(result); | |
| 304 } | |
| 305 } | |
| 306 | |
| 307 | |
| 308 bool VirtualMemory::IsReserved() { | |
| 309 return address_ != NULL; | |
| 310 } | |
| 311 | |
| 312 | |
| 313 void VirtualMemory::Reset() { | |
| 314 address_ = NULL; | |
| 315 size_ = 0; | |
| 316 } | |
| 317 | |
| 318 | |
| 319 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | |
| 320 return CommitRegion(address, size, is_executable); | |
| 321 } | |
| 322 | |
| 323 | |
| 324 bool VirtualMemory::Uncommit(void* address, size_t size) { | |
| 325 ASSERT(IsReserved()); | |
| 326 return UncommitRegion(address, size); | |
| 327 } | |
| 328 | |
| 329 | |
| 330 void* VirtualMemory::ReserveRegion(size_t size) { | |
| 331 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); | |
| 332 } | |
| 333 | |
| 334 | |
| 335 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
| 336 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | |
| 337 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { | |
| 338 return false; | |
| 339 } | |
| 340 return true; | |
| 341 } | |
| 342 | |
| 343 | |
| 344 bool VirtualMemory::Guard(void* address) { | |
| 345 if (NULL == VirtualAlloc(address, | |
| 346 OS::CommitPageSize(), | |
| 347 MEM_COMMIT, | |
| 348 PAGE_NOACCESS)) { | |
| 349 return false; | |
| 350 } | |
| 351 return true; | |
| 352 } | |
| 353 | |
| 354 | |
| 355 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | |
| 356 return VirtualFree(base, size, MEM_DECOMMIT) != 0; | |
| 357 } | |
| 358 | |
| 359 | |
| 360 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | |
| 361 return VirtualFree(base, 0, MEM_RELEASE) != 0; | |
| 362 } | |
| 363 | |
| 364 | |
| 365 bool VirtualMemory::HasLazyCommits() { | |
| 366 // TODO(alph): implement for the platform. | |
| 367 return false; | |
| 368 } | |
| 369 | |
| 370 } } // namespace v8::internal | 234 } } // namespace v8::internal |
| OLD | NEW |