OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 __attribute__((weak_import)); | 72 __attribute__((weak_import)); |
73 extern void backtrace_symbols_fd(void* const*, int, int) | 73 extern void backtrace_symbols_fd(void* const*, int, int) |
74 __attribute__((weak_import)); | 74 __attribute__((weak_import)); |
75 } | 75 } |
76 | 76 |
77 | 77 |
78 namespace v8 { | 78 namespace v8 { |
79 namespace internal { | 79 namespace internal { |
80 | 80 |
81 | 81 |
82 static Mutex* limit_mutex = NULL; | |
83 | |
84 | |
85 // We keep the lowest and highest addresses mapped as a quick way of | |
86 // determining that pointers are outside the heap (used mostly in assertions | |
87 // and verification). The estimate is conservative, i.e., not all addresses in | |
88 // 'allocated' space are actually allocated to our heap. The range is | |
89 // [lowest, highest), inclusive on the low and and exclusive on the high end. | |
90 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | |
91 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | |
92 | |
93 | |
94 static void UpdateAllocatedSpaceLimits(void* address, int size) { | |
95 ASSERT(limit_mutex != NULL); | |
96 LockGuard<Mutex> lock(limit_mutex); | |
97 | |
98 lowest_ever_allocated = Min(lowest_ever_allocated, address); | |
99 highest_ever_allocated = | |
100 Max(highest_ever_allocated, | |
101 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | |
102 } | |
103 | |
104 | |
105 bool OS::IsOutsideAllocatedSpace(void* address) { | |
106 return address < lowest_ever_allocated || address >= highest_ever_allocated; | |
107 } | |
108 | |
109 | |
110 // Constants used for mmap. | 82 // Constants used for mmap. |
111 // kMmapFd is used to pass vm_alloc flags to tag the region with the user | 83 // kMmapFd is used to pass vm_alloc flags to tag the region with the user |
112 // defined tag 255 This helps identify V8-allocated regions in memory analysis | 84 // defined tag 255 This helps identify V8-allocated regions in memory analysis |
113 // tools like vmmap(1). | 85 // tools like vmmap(1). |
114 static const int kMmapFd = VM_MAKE_TAG(255); | 86 static const int kMmapFd = VM_MAKE_TAG(255); |
115 static const off_t kMmapFdOffset = 0; | 87 static const off_t kMmapFdOffset = 0; |
116 | 88 |
117 | 89 |
118 void* OS::Allocate(const size_t requested, | 90 void* OS::Allocate(const size_t requested, |
119 size_t* allocated, | 91 size_t* allocated, |
120 bool is_executable) { | 92 bool is_executable) { |
121 const size_t msize = RoundUp(requested, getpagesize()); | 93 const size_t msize = RoundUp(requested, getpagesize()); |
122 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 94 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
123 void* mbase = mmap(OS::GetRandomMmapAddr(), | 95 void* mbase = mmap(OS::GetRandomMmapAddr(), |
124 msize, | 96 msize, |
125 prot, | 97 prot, |
126 MAP_PRIVATE | MAP_ANON, | 98 MAP_PRIVATE | MAP_ANON, |
127 kMmapFd, | 99 kMmapFd, |
128 kMmapFdOffset); | 100 kMmapFdOffset); |
129 if (mbase == MAP_FAILED) { | 101 if (mbase == MAP_FAILED) { |
130 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); | 102 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); |
131 return NULL; | 103 return NULL; |
132 } | 104 } |
133 *allocated = msize; | 105 *allocated = msize; |
134 UpdateAllocatedSpaceLimits(mbase, msize); | |
135 return mbase; | 106 return mbase; |
136 } | 107 } |
137 | 108 |
138 | 109 |
139 void OS::DumpBacktrace() { | 110 void OS::DumpBacktrace() { |
140 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. | 111 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. |
141 if (backtrace == NULL) return; | 112 if (backtrace == NULL) return; |
142 | 113 |
143 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); | 114 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); |
144 } | 115 } |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 bool is_executable) { | 330 bool is_executable) { |
360 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 331 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
361 if (MAP_FAILED == mmap(address, | 332 if (MAP_FAILED == mmap(address, |
362 size, | 333 size, |
363 prot, | 334 prot, |
364 MAP_PRIVATE | MAP_ANON | MAP_FIXED, | 335 MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
365 kMmapFd, | 336 kMmapFd, |
366 kMmapFdOffset)) { | 337 kMmapFdOffset)) { |
367 return false; | 338 return false; |
368 } | 339 } |
369 | |
370 UpdateAllocatedSpaceLimits(address, size); | |
371 return true; | 340 return true; |
372 } | 341 } |
373 | 342 |
374 | 343 |
375 bool VirtualMemory::UncommitRegion(void* address, size_t size) { | 344 bool VirtualMemory::UncommitRegion(void* address, size_t size) { |
376 return mmap(address, | 345 return mmap(address, |
377 size, | 346 size, |
378 PROT_NONE, | 347 PROT_NONE, |
379 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, | 348 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
380 kMmapFd, | 349 kMmapFd, |
381 kMmapFdOffset) != MAP_FAILED; | 350 kMmapFdOffset) != MAP_FAILED; |
382 } | 351 } |
383 | 352 |
384 | 353 |
385 bool VirtualMemory::ReleaseRegion(void* address, size_t size) { | 354 bool VirtualMemory::ReleaseRegion(void* address, size_t size) { |
386 return munmap(address, size) == 0; | 355 return munmap(address, size) == 0; |
387 } | 356 } |
388 | 357 |
389 | 358 |
390 bool VirtualMemory::HasLazyCommits() { | 359 bool VirtualMemory::HasLazyCommits() { |
391 return false; | 360 return false; |
392 } | 361 } |
393 | 362 |
394 | 363 |
395 void OS::SetUp() { | 364 void OS::SetUp() { |
396 // Seed the random number generator. We preserve microsecond resolution. | 365 // Seed the random number generator. We preserve microsecond resolution. |
397 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); | 366 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); |
398 srandom(static_cast<unsigned int>(seed)); | 367 srandom(static_cast<unsigned int>(seed)); |
399 limit_mutex = new Mutex(); | |
400 } | 368 } |
401 | 369 |
402 | 370 |
403 void OS::TearDown() { | 371 void OS::TearDown() { |
404 delete limit_mutex; | |
405 } | 372 } |
406 | 373 |
407 | 374 |
408 } } // namespace v8::internal | 375 } } // namespace v8::internal |
OLD | NEW |