OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 #include "platform-posix.h" | 54 #include "platform-posix.h" |
55 #include "platform.h" | 55 #include "platform.h" |
56 #include "v8threads.h" | 56 #include "v8threads.h" |
57 #include "vm-state-inl.h" | 57 #include "vm-state-inl.h" |
58 | 58 |
59 | 59 |
60 namespace v8 { | 60 namespace v8 { |
61 namespace internal { | 61 namespace internal { |
62 | 62 |
63 | 63 |
64 static Mutex* limit_mutex = NULL; | |
65 | |
66 | |
67 const char* OS::LocalTimezone(double time) { | 64 const char* OS::LocalTimezone(double time) { |
68 if (std::isnan(time)) return ""; | 65 if (std::isnan(time)) return ""; |
69 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); | 66 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); |
70 struct tm* t = localtime(&tv); | 67 struct tm* t = localtime(&tv); |
71 if (NULL == t) return ""; | 68 if (NULL == t) return ""; |
72 return t->tm_zone; | 69 return t->tm_zone; |
73 } | 70 } |
74 | 71 |
75 | 72 |
76 double OS::LocalTimeOffset() { | 73 double OS::LocalTimeOffset() { |
77 time_t tv = time(NULL); | 74 time_t tv = time(NULL); |
78 struct tm* t = localtime(&tv); | 75 struct tm* t = localtime(&tv); |
79 // tm_gmtoff includes any daylight savings offset, so subtract it. | 76 // tm_gmtoff includes any daylight savings offset, so subtract it. |
80 return static_cast<double>(t->tm_gmtoff * msPerSecond - | 77 return static_cast<double>(t->tm_gmtoff * msPerSecond - |
81 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); | 78 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); |
82 } | 79 } |
83 | 80 |
84 | 81 |
85 // We keep the lowest and highest addresses mapped as a quick way of | |
86 // determining that pointers are outside the heap (used mostly in assertions | |
87 // and verification). The estimate is conservative, i.e., not all addresses in | |
88 // 'allocated' space are actually allocated to our heap. The range is | |
89 // [lowest, highest), inclusive on the low and and exclusive on the high end. | |
90 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | |
91 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | |
92 | |
93 | |
94 static void UpdateAllocatedSpaceLimits(void* address, int size) { | |
95 ASSERT(limit_mutex != NULL); | |
96 LockGuard<Mutex> lock(limit_mutex); | |
97 | |
98 lowest_ever_allocated = Min(lowest_ever_allocated, address); | |
99 highest_ever_allocated = | |
100 Max(highest_ever_allocated, | |
101 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | |
102 } | |
103 | |
104 | |
105 bool OS::IsOutsideAllocatedSpace(void* address) { | |
106 return address < lowest_ever_allocated || address >= highest_ever_allocated; | |
107 } | |
108 | |
109 | |
110 void* OS::Allocate(const size_t requested, | 82 void* OS::Allocate(const size_t requested, |
111 size_t* allocated, | 83 size_t* allocated, |
112 bool is_executable) { | 84 bool is_executable) { |
113 const size_t msize = RoundUp(requested, AllocateAlignment()); | 85 const size_t msize = RoundUp(requested, AllocateAlignment()); |
114 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 86 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
115 void* addr = OS::GetRandomMmapAddr(); | 87 void* addr = OS::GetRandomMmapAddr(); |
116 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); | 88 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); |
117 if (mbase == MAP_FAILED) { | 89 if (mbase == MAP_FAILED) { |
118 LOG(i::Isolate::Current(), | 90 LOG(i::Isolate::Current(), |
119 StringEvent("OS::Allocate", "mmap failed")); | 91 StringEvent("OS::Allocate", "mmap failed")); |
120 return NULL; | 92 return NULL; |
121 } | 93 } |
122 *allocated = msize; | 94 *allocated = msize; |
123 UpdateAllocatedSpaceLimits(mbase, msize); | |
124 return mbase; | 95 return mbase; |
125 } | 96 } |
126 | 97 |
127 | 98 |
128 void OS::DumpBacktrace() { | 99 void OS::DumpBacktrace() { |
129 // Currently unsupported. | 100 // Currently unsupported. |
130 } | 101 } |
131 | 102 |
132 | 103 |
133 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 104 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
395 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 366 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
396 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 367 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
397 if (MAP_FAILED == mmap(base, | 368 if (MAP_FAILED == mmap(base, |
398 size, | 369 size, |
399 prot, | 370 prot, |
400 MAP_PRIVATE | MAP_ANON | MAP_FIXED, | 371 MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
401 kMmapFd, | 372 kMmapFd, |
402 kMmapFdOffset)) { | 373 kMmapFdOffset)) { |
403 return false; | 374 return false; |
404 } | 375 } |
405 | |
406 UpdateAllocatedSpaceLimits(base, size); | |
407 return true; | 376 return true; |
408 } | 377 } |
409 | 378 |
410 | 379 |
411 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 380 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
412 return mmap(base, | 381 return mmap(base, |
413 size, | 382 size, |
414 PROT_NONE, | 383 PROT_NONE, |
415 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, | 384 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
416 kMmapFd, | 385 kMmapFd, |
417 kMmapFdOffset) != MAP_FAILED; | 386 kMmapFdOffset) != MAP_FAILED; |
418 } | 387 } |
419 | 388 |
420 | 389 |
421 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 390 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
422 return munmap(base, size) == 0; | 391 return munmap(base, size) == 0; |
423 } | 392 } |
424 | 393 |
425 | 394 |
426 bool VirtualMemory::HasLazyCommits() { | 395 bool VirtualMemory::HasLazyCommits() { |
427 // TODO(alph): implement for the platform. | 396 // TODO(alph): implement for the platform. |
428 return false; | 397 return false; |
429 } | 398 } |
430 | 399 |
431 | 400 |
432 void OS::SetUp() { | 401 void OS::SetUp() { |
433 // Seed the random number generator. We preserve microsecond resolution. | 402 // Seed the random number generator. We preserve microsecond resolution. |
434 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); | 403 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); |
435 srandom(static_cast<unsigned int>(seed)); | 404 srandom(static_cast<unsigned int>(seed)); |
436 limit_mutex = new Mutex(); | |
437 } | 405 } |
438 | 406 |
439 | 407 |
440 void OS::TearDown() { | 408 void OS::TearDown() { |
441 delete limit_mutex; | |
442 } | 409 } |
443 | 410 |
444 | 411 |
445 } } // namespace v8::internal | 412 } } // namespace v8::internal |
OLD | NEW |