| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 187 // Redirect to std abort to signal abnormal program termination. | 187 // Redirect to std abort to signal abnormal program termination. |
| 188 abort(); | 188 abort(); |
| 189 } | 189 } |
| 190 | 190 |
| 191 | 191 |
| 192 void OS::DebugBreak() { | 192 void OS::DebugBreak() { |
| 193 asm("int $3"); | 193 asm("int $3"); |
| 194 } | 194 } |
| 195 | 195 |
| 196 | 196 |
| 197 void OS::DumpBacktrace() { |
| 198 // Currently unsupported. |
| 199 } |
| 200 |
| 201 |
| 197 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 202 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
| 198 public: | 203 public: |
| 199 PosixMemoryMappedFile(FILE* file, void* memory, int size) | 204 PosixMemoryMappedFile(FILE* file, void* memory, int size) |
| 200 : file_(file), memory_(memory), size_(size) { } | 205 : file_(file), memory_(memory), size_(size) { } |
| 201 virtual ~PosixMemoryMappedFile(); | 206 virtual ~PosixMemoryMappedFile(); |
| 202 virtual void* memory() { return memory_; } | 207 virtual void* memory() { return memory_; } |
| 203 virtual int size() { return size_; } | 208 virtual int size() { return size_; } |
| 204 private: | 209 private: |
| 205 FILE* file_; | 210 FILE* file_; |
| 206 void* memory_; | 211 void* memory_; |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 312 return 0; | 317 return 0; |
| 313 } | 318 } |
| 314 | 319 |
| 315 | 320 |
| 316 // The VirtualMemory implementation is taken from platform-win32.cc. | 321 // The VirtualMemory implementation is taken from platform-win32.cc. |
| 317 // The mmap-based virtual memory implementation as it is used on most posix | 322 // The mmap-based virtual memory implementation as it is used on most posix |
| 318 // platforms does not work well because Cygwin does not support MAP_FIXED. | 323 // platforms does not work well because Cygwin does not support MAP_FIXED. |
| 319 // This causes VirtualMemory::Commit to not always commit the memory region | 324 // This causes VirtualMemory::Commit to not always commit the memory region |
| 320 // specified. | 325 // specified. |
| 321 | 326 |
| 327 static void* GetRandomAddr() { |
| 328 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 329 // Note that the current isolate isn't set up in a call path via |
| 330 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 331 // the code page is immediately freed. |
| 332 if (isolate != NULL) { |
| 333 // The address range used to randomize RWX allocations in OS::Allocate |
| 334 // Try not to map pages into the default range that windows loads DLLs |
| 335 // Use a multiple of 64k to prevent committing unused memory. |
| 336 // Note: This does not guarantee RWX regions will be within the |
| 337 // range kAllocationRandomAddressMin to kAllocationRandomAddressMax |
| 338 #ifdef V8_HOST_ARCH_64_BIT |
| 339 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; |
| 340 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; |
| 341 #else |
| 342 static const intptr_t kAllocationRandomAddressMin = 0x04000000; |
| 343 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; |
| 344 #endif |
| 345 uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits) |
| 346 | kAllocationRandomAddressMin; |
| 347 address &= kAllocationRandomAddressMax; |
| 348 return reinterpret_cast<void *>(address); |
| 349 } |
| 350 return NULL; |
| 351 } |
| 352 |
| 353 |
| 354 static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { |
| 355 LPVOID base = NULL; |
| 356 |
| 357 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { |
| 358 // For exectutable pages try and randomize the allocation address |
| 359 for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { |
| 360 base = VirtualAlloc(GetRandomAddr(), size, action, protection); |
| 361 } |
| 362 } |
| 363 |
| 364 // After three attempts give up and let the OS find an address to use. |
| 365 if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); |
| 366 |
| 367 return base; |
| 368 } |
| 369 |
| 370 |
| 322 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | 371 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
| 323 | 372 |
| 324 | 373 |
| 325 VirtualMemory::VirtualMemory(size_t size) | 374 VirtualMemory::VirtualMemory(size_t size) |
| 326 : address_(ReserveRegion(size)), size_(size) { } | 375 : address_(ReserveRegion(size)), size_(size) { } |
| 327 | 376 |
| 328 | 377 |
| 378 VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
| 379 : address_(NULL), size_(0) { |
| 380 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| 381 size_t request_size = RoundUp(size + alignment, |
| 382 static_cast<intptr_t>(OS::AllocateAlignment())); |
| 383 void* address = ReserveRegion(request_size); |
| 384 if (address == NULL) return; |
| 385 Address base = RoundUp(static_cast<Address>(address), alignment); |
| 386 // Try reducing the size by freeing and then reallocating a specific area. |
| 387 bool result = ReleaseRegion(address, request_size); |
| 388 USE(result); |
| 389 ASSERT(result); |
| 390 address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); |
| 391 if (address != NULL) { |
| 392 request_size = size; |
| 393 ASSERT(base == static_cast<Address>(address)); |
| 394 } else { |
| 395 // Resizing failed, just go with a bigger area. |
| 396 address = ReserveRegion(request_size); |
| 397 if (address == NULL) return; |
| 398 } |
| 399 address_ = address; |
| 400 size_ = request_size; |
| 401 } |
| 402 |
| 403 |
| 329 VirtualMemory::~VirtualMemory() { | 404 VirtualMemory::~VirtualMemory() { |
| 330 if (IsReserved()) { | 405 if (IsReserved()) { |
| 331 if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL; | 406 bool result = ReleaseRegion(address_, size_); |
| 407 ASSERT(result); |
| 408 USE(result); |
| 332 } | 409 } |
| 333 } | 410 } |
| 334 | 411 |
| 335 | 412 |
| 336 bool VirtualMemory::IsReserved() { | 413 bool VirtualMemory::IsReserved() { |
| 337 return address_ != NULL; | 414 return address_ != NULL; |
| 338 } | 415 } |
| 339 | 416 |
| 340 | 417 |
| 341 void VirtualMemory::Reset() { | 418 void VirtualMemory::Reset() { |
| 342 address_ = NULL; | 419 address_ = NULL; |
| 343 size_ = 0; | 420 size_ = 0; |
| 344 } | 421 } |
| 345 | 422 |
| 346 | 423 |
| 347 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | 424 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
| 348 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; | 425 return CommitRegion(address, size, is_executable); |
| 349 if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) { | |
| 350 return false; | |
| 351 } | |
| 352 | |
| 353 UpdateAllocatedSpaceLimits(address, static_cast<int>(size)); | |
| 354 return true; | |
| 355 } | 426 } |
| 356 | 427 |
| 357 | 428 |
| 358 bool VirtualMemory::Uncommit(void* address, size_t size) { | 429 bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 359 ASSERT(IsReserved()); | 430 ASSERT(IsReserved()); |
| 360 return VirtualFree(address, size, MEM_DECOMMIT) != false; | 431 return UncommitRegion(address, size); |
| 432 } |
| 433 |
| 434 |
| 435 void* VirtualMemory::ReserveRegion(size_t size) { |
| 436 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); |
| 437 } |
| 438 |
| 439 |
| 440 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| 441 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; |
| 442 if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { |
| 443 return false; |
| 444 } |
| 445 |
| 446 UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); |
| 447 return true; |
| 361 } | 448 } |
| 362 | 449 |
| 363 | 450 |
| 364 bool VirtualMemory::Guard(void* address) { | 451 bool VirtualMemory::Guard(void* address) { |
| 365 if (NULL == VirtualAlloc(address, | 452 if (NULL == VirtualAlloc(address, |
| 366 OS::CommitPageSize(), | 453 OS::CommitPageSize(), |
| 367 MEM_COMMIT, | 454 MEM_COMMIT, |
| 368 PAGE_READONLY | PAGE_GUARD)) { | 455 PAGE_READONLY | PAGE_GUARD)) { |
| 369 return false; | 456 return false; |
| 370 } | 457 } |
| 371 return true; | 458 return true; |
| 372 } | 459 } |
| 373 | 460 |
| 374 | 461 |
| 375 void* VirtualMemory::ReserveRegion(size_t size) { | |
| 376 return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); | |
| 377 } | |
| 378 | |
| 379 | |
| 380 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
| 381 UNIMPLEMENTED(); | |
| 382 return false; | |
| 383 } | |
| 384 | |
| 385 | |
| 386 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 462 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
| 387 UNIMPLEMENTED(); | 463 return VirtualFree(base, size, MEM_DECOMMIT) != 0; |
| 388 return false; | |
| 389 } | 464 } |
| 390 | 465 |
| 391 | 466 |
| 392 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 467 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| 393 UNIMPLEMENTED(); | 468 return VirtualFree(base, 0, MEM_RELEASE) != 0; |
| 394 return false; | |
| 395 } | 469 } |
| 396 | 470 |
| 397 | 471 |
| 398 bool VirtualMemory::HasLazyCommits() { | 472 bool VirtualMemory::HasLazyCommits() { |
| 399 // TODO(alph): implement for the platform. | 473 // TODO(alph): implement for the platform. |
| 400 return false; | 474 return false; |
| 401 } | 475 } |
| 402 | 476 |
| 403 | 477 |
| 404 class Thread::PlatformData : public Malloced { | 478 class Thread::PlatformData : public Malloced { |
| 405 public: | 479 public: |
| 406 PlatformData() : thread_(kNoThread) {} | 480 PlatformData() : thread_(kNoThread) {} |
| 407 pthread_t thread_; // Thread handle for pthread. | 481 pthread_t thread_; // Thread handle for pthread. |
| 408 }; | 482 }; |
| 409 | 483 |
| 410 | 484 |
| 411 | |
| 412 | |
| 413 Thread::Thread(const Options& options) | 485 Thread::Thread(const Options& options) |
| 414 : data_(new PlatformData()), | 486 : data_(new PlatformData()), |
| 415 stack_size_(options.stack_size()), | 487 stack_size_(options.stack_size()), |
| 416 start_semaphore_(NULL) { | 488 start_semaphore_(NULL) { |
| 417 set_name(options.name()); | 489 set_name(options.name()); |
| 418 } | 490 } |
| 419 | 491 |
| 420 | 492 |
| 421 Thread::~Thread() { | 493 Thread::~Thread() { |
| 422 delete data_; | 494 delete data_; |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 828 | 900 |
| 829 | 901 |
| 830 void Sampler::Stop() { | 902 void Sampler::Stop() { |
| 831 ASSERT(IsActive()); | 903 ASSERT(IsActive()); |
| 832 SamplerThread::RemoveActiveSampler(this); | 904 SamplerThread::RemoveActiveSampler(this); |
| 833 SetActive(false); | 905 SetActive(false); |
| 834 } | 906 } |
| 835 | 907 |
| 836 | 908 |
| 837 } } // namespace v8::internal | 909 } } // namespace v8::internal |
| OLD | NEW |