OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
70 static const pthread_t kNoThread = (pthread_t) 0; | 70 static const pthread_t kNoThread = (pthread_t) 0; |
71 | 71 |
72 | 72 |
73 double ceiling(double x) { | 73 double ceiling(double x) { |
74 return ceil(x); | 74 return ceil(x); |
75 } | 75 } |
76 | 76 |
77 | 77 |
78 static Mutex* limit_mutex = NULL; | 78 static Mutex* limit_mutex = NULL; |
79 | 79 |
80 | |
81 static void* GetRandomMmapAddr() { | |
82 Isolate* isolate = Isolate::UncheckedCurrent(); | |
83 // Note that the current isolate isn't set up in a call path via | |
84 // CpuFeatures::Probe. We don't care about randomization in this case because | |
85 // the code page is immediately freed. | |
86 if (isolate != NULL) { | |
87 #ifdef V8_TARGET_ARCH_X64 | |
88 uint64_t rnd1 = V8::RandomPrivate(isolate); | |
89 uint64_t rnd2 = V8::RandomPrivate(isolate); | |
90 uint64_t raw_addr = (rnd1 << 32) ^ rnd2; | |
91 // Currently available CPUs have 48 bits of virtual addressing. Truncate | |
92 // the hint address to 46 bits to give the kernel a fighting chance of | |
93 // fulfilling our placement request. | |
94 raw_addr &= V8_UINT64_C(0x3ffffffff000); | |
95 #else | |
96 uint32_t raw_addr = V8::RandomPrivate(isolate); | |
97 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a | |
98 // variety of ASLR modes (PAE kernel, NX compat mode, etc). | |
99 raw_addr &= 0x3ffff000; | |
100 raw_addr += 0x20000000; | |
101 #endif | |
102 return reinterpret_cast<void*>(raw_addr); | |
103 } | |
104 return NULL; | |
105 } | |
106 | |
Vyacheslav Egorov (Chromium)
2011/10/10 14:13:14
accidentally deleted new line
| |
107 | |
108 void OS::Setup() { | 80 void OS::Setup() { |
109 // Seed the random number generator. We preserve microsecond resolution. | 81 // Seed the random number generator. We preserve microsecond resolution. |
110 uint64_t seed = Ticks() ^ (getpid() << 16); | 82 uint64_t seed = Ticks() ^ (getpid() << 16); |
111 srandom(static_cast<unsigned int>(seed)); | 83 srandom(static_cast<unsigned int>(seed)); |
112 limit_mutex = CreateMutex(); | 84 limit_mutex = CreateMutex(); |
113 | 85 |
114 #ifdef __arm__ | 86 #ifdef __arm__ |
115 // When running on ARM hardware check that the EABI used by V8 and | 87 // When running on ARM hardware check that the EABI used by V8 and |
116 // by the C code is the same. | 88 // by the C code is the same. |
117 bool hard_float = OS::ArmUsingHardFloat(); | 89 bool hard_float = OS::ArmUsingHardFloat(); |
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
379 size_t OS::AllocateAlignment() { | 351 size_t OS::AllocateAlignment() { |
380 return sysconf(_SC_PAGESIZE); | 352 return sysconf(_SC_PAGESIZE); |
381 } | 353 } |
382 | 354 |
383 | 355 |
384 void* OS::Allocate(const size_t requested, | 356 void* OS::Allocate(const size_t requested, |
385 size_t* allocated, | 357 size_t* allocated, |
386 bool is_executable) { | 358 bool is_executable) { |
387 const size_t msize = RoundUp(requested, AllocateAlignment()); | 359 const size_t msize = RoundUp(requested, AllocateAlignment()); |
388 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 360 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
389 void* addr = GetRandomMmapAddr(); | 361 void* addr = OS::GetRandomMmapAddr(); |
390 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 362 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
391 if (mbase == MAP_FAILED) { | 363 if (mbase == MAP_FAILED) { |
392 LOG(i::Isolate::Current(), | 364 LOG(i::Isolate::Current(), |
393 StringEvent("OS::Allocate", "mmap failed")); | 365 StringEvent("OS::Allocate", "mmap failed")); |
394 return NULL; | 366 return NULL; |
395 } | 367 } |
396 *allocated = msize; | 368 *allocated = msize; |
397 UpdateAllocatedSpaceLimits(mbase, msize); | 369 UpdateAllocatedSpaceLimits(mbase, msize); |
398 return mbase; | 370 return mbase; |
399 } | 371 } |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
449 | 421 |
450 | 422 |
451 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { | 423 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { |
452 FILE* file = fopen(name, "r+"); | 424 FILE* file = fopen(name, "r+"); |
453 if (file == NULL) return NULL; | 425 if (file == NULL) return NULL; |
454 | 426 |
455 fseek(file, 0, SEEK_END); | 427 fseek(file, 0, SEEK_END); |
456 int size = ftell(file); | 428 int size = ftell(file); |
457 | 429 |
458 void* memory = | 430 void* memory = |
459 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); | 431 mmap(OS::GetRandomMmapAddr(), |
432 size, | |
433 PROT_READ | PROT_WRITE, | |
434 MAP_SHARED, | |
435 fileno(file), | |
436 0); | |
460 return new PosixMemoryMappedFile(file, memory, size); | 437 return new PosixMemoryMappedFile(file, memory, size); |
461 } | 438 } |
462 | 439 |
463 | 440 |
464 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, | 441 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, |
465 void* initial) { | 442 void* initial) { |
466 FILE* file = fopen(name, "w+"); | 443 FILE* file = fopen(name, "w+"); |
467 if (file == NULL) return NULL; | 444 if (file == NULL) return NULL; |
468 int result = fwrite(initial, size, 1, file); | 445 int result = fwrite(initial, size, 1, file); |
469 if (result < 1) { | 446 if (result < 1) { |
470 fclose(file); | 447 fclose(file); |
471 return NULL; | 448 return NULL; |
472 } | 449 } |
473 void* memory = | 450 void* memory = |
474 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); | 451 mmap(OS::GetRandomMmapAddr(), |
452 size, | |
453 PROT_READ | PROT_WRITE, | |
454 MAP_SHARED, | |
455 fileno(file), | |
456 0); | |
475 return new PosixMemoryMappedFile(file, memory, size); | 457 return new PosixMemoryMappedFile(file, memory, size); |
476 } | 458 } |
477 | 459 |
478 | 460 |
479 PosixMemoryMappedFile::~PosixMemoryMappedFile() { | 461 PosixMemoryMappedFile::~PosixMemoryMappedFile() { |
480 if (memory_) OS::Free(memory_, size_); | 462 if (memory_) OS::Free(memory_, size_); |
481 fclose(file_); | 463 fclose(file_); |
482 } | 464 } |
483 | 465 |
484 | 466 |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
549 // Support for ll_prof.py. | 531 // Support for ll_prof.py. |
550 // | 532 // |
551 // The Linux profiler built into the kernel logs all mmap's with | 533 // The Linux profiler built into the kernel logs all mmap's with |
552 // PROT_EXEC so that analysis tools can properly attribute ticks. We | 534 // PROT_EXEC so that analysis tools can properly attribute ticks. We |
553 // do a mmap with a name known by ll_prof.py and immediately munmap | 535 // do a mmap with a name known by ll_prof.py and immediately munmap |
554 // it. This injects a GC marker into the stream of events generated | 536 // it. This injects a GC marker into the stream of events generated |
555 // by the kernel and allows us to synchronize V8 code log and the | 537 // by the kernel and allows us to synchronize V8 code log and the |
556 // kernel log. | 538 // kernel log. |
557 int size = sysconf(_SC_PAGESIZE); | 539 int size = sysconf(_SC_PAGESIZE); |
558 FILE* f = fopen(kGCFakeMmap, "w+"); | 540 FILE* f = fopen(kGCFakeMmap, "w+"); |
559 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, | 541 void* addr = mmap(OS::GetRandomMmapAddr(), |
560 fileno(f), 0); | 542 size, |
543 PROT_READ | PROT_EXEC, | |
544 MAP_PRIVATE, | |
545 fileno(f), | |
546 0); | |
561 ASSERT(addr != MAP_FAILED); | 547 ASSERT(addr != MAP_FAILED); |
562 OS::Free(addr, size); | 548 OS::Free(addr, size); |
563 fclose(f); | 549 fclose(f); |
564 } | 550 } |
565 | 551 |
566 | 552 |
567 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 553 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
568 // backtrace is a glibc extension. | 554 // backtrace is a glibc extension. |
569 #ifdef __GLIBC__ | 555 #ifdef __GLIBC__ |
570 int frames_size = frames.length(); | 556 int frames_size = frames.length(); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
607 address_ = ReserveRegion(size); | 593 address_ = ReserveRegion(size); |
608 size_ = size; | 594 size_ = size; |
609 } | 595 } |
610 | 596 |
611 | 597 |
612 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | 598 VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
613 : address_(NULL), size_(0) { | 599 : address_(NULL), size_(0) { |
614 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | 600 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
615 size_t request_size = RoundUp(size + alignment, | 601 size_t request_size = RoundUp(size + alignment, |
616 static_cast<intptr_t>(OS::AllocateAlignment())); | 602 static_cast<intptr_t>(OS::AllocateAlignment())); |
617 void* reservation = mmap(GetRandomMmapAddr(), | 603 void* reservation = mmap(OS::GetRandomMmapAddr(), |
618 request_size, | 604 request_size, |
619 PROT_NONE, | 605 PROT_NONE, |
620 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | 606 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
621 kMmapFd, | 607 kMmapFd, |
622 kMmapFdOffset); | 608 kMmapFdOffset); |
623 if (reservation == MAP_FAILED) return; | 609 if (reservation == MAP_FAILED) return; |
624 | 610 |
625 Address base = static_cast<Address>(reservation); | 611 Address base = static_cast<Address>(reservation); |
626 Address aligned_base = RoundUp(base, alignment); | 612 Address aligned_base = RoundUp(base, alignment); |
627 ASSERT_LE(base, aligned_base); | 613 ASSERT_LE(base, aligned_base); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
673 return CommitRegion(address, size, is_executable); | 659 return CommitRegion(address, size, is_executable); |
674 } | 660 } |
675 | 661 |
676 | 662 |
677 bool VirtualMemory::Uncommit(void* address, size_t size) { | 663 bool VirtualMemory::Uncommit(void* address, size_t size) { |
678 return UncommitRegion(address, size); | 664 return UncommitRegion(address, size); |
679 } | 665 } |
680 | 666 |
681 | 667 |
682 void* VirtualMemory::ReserveRegion(size_t size) { | 668 void* VirtualMemory::ReserveRegion(size_t size) { |
683 void* result = mmap(GetRandomMmapAddr(), | 669 void* result = mmap(OS::GetRandomMmapAddr(), |
684 size, | 670 size, |
685 PROT_NONE, | 671 PROT_NONE, |
686 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | 672 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
687 kMmapFd, | 673 kMmapFd, |
688 kMmapFdOffset); | 674 kMmapFdOffset); |
689 | 675 |
690 if (result == MAP_FAILED) return NULL; | 676 if (result == MAP_FAILED) return NULL; |
691 | 677 |
692 return result; | 678 return result; |
693 } | 679 } |
(...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1222 | 1208 |
1223 | 1209 |
1224 void Sampler::Stop() { | 1210 void Sampler::Stop() { |
1225 ASSERT(IsActive()); | 1211 ASSERT(IsActive()); |
1226 SignalSender::RemoveActiveSampler(this); | 1212 SignalSender::RemoveActiveSampler(this); |
1227 SetActive(false); | 1213 SetActive(false); |
1228 } | 1214 } |
1229 | 1215 |
1230 | 1216 |
1231 } } // namespace v8::internal | 1217 } } // namespace v8::internal |
OLD | NEW |