OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 | 74 |
75 double OS::LocalTimeOffset() { | 75 double OS::LocalTimeOffset() { |
76 time_t tv = time(NULL); | 76 time_t tv = time(NULL); |
77 struct tm* t = localtime(&tv); | 77 struct tm* t = localtime(&tv); |
78 // tm_gmtoff includes any daylight savings offset, so subtract it. | 78 // tm_gmtoff includes any daylight savings offset, so subtract it. |
79 return static_cast<double>(t->tm_gmtoff * msPerSecond - | 79 return static_cast<double>(t->tm_gmtoff * msPerSecond - |
80 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); | 80 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); |
81 } | 81 } |
82 | 82 |
83 | 83 |
84 void* OS::Allocate(const size_t requested, | |
85 size_t* allocated, | |
86 bool executable) { | |
87 const size_t msize = RoundUp(requested, getpagesize()); | |
88 int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); | |
89 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); | |
90 | |
91 if (mbase == MAP_FAILED) { | |
92 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); | |
93 return NULL; | |
94 } | |
95 *allocated = msize; | |
96 return mbase; | |
97 } | |
98 | |
99 | |
100 void OS::DumpBacktrace() { | 84 void OS::DumpBacktrace() { |
101 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); | 85 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); |
102 } | 86 } |
103 | 87 |
104 | 88 |
105 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 89 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
106 public: | 90 public: |
107 PosixMemoryMappedFile(FILE* file, void* memory, int size) | 91 PosixMemoryMappedFile(FILE* file, void* memory, int size) |
108 : file_(file), memory_(memory), size_(size) { } | 92 : file_(file), memory_(memory), size_(size) { } |
109 virtual ~PosixMemoryMappedFile(); | 93 virtual ~PosixMemoryMappedFile(); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
196 | 180 |
197 | 181 |
198 void OS::SignalCodeMovingGC() { | 182 void OS::SignalCodeMovingGC() { |
199 } | 183 } |
200 | 184 |
201 | 185 |
202 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 186 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
203 return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); | 187 return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); |
204 } | 188 } |
205 | 189 |
206 | |
207 // Constants used for mmap. | |
208 static const int kMmapFd = -1; | |
209 static const int kMmapFdOffset = 0; | |
210 | |
211 | |
212 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | |
213 | |
214 | |
215 VirtualMemory::VirtualMemory(size_t size) | |
216 : address_(ReserveRegion(size)), size_(size) { } | |
217 | |
218 | |
219 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | |
220 : address_(NULL), size_(0) { | |
221 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
222 size_t request_size = RoundUp(size + alignment, | |
223 static_cast<intptr_t>(OS::AllocateAlignment())); | |
224 void* reservation = mmap(OS::GetRandomMmapAddr(), | |
225 request_size, | |
226 PROT_NONE, | |
227 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | |
228 kMmapFd, | |
229 kMmapFdOffset); | |
230 if (reservation == MAP_FAILED) return; | |
231 | |
232 Address base = static_cast<Address>(reservation); | |
233 Address aligned_base = RoundUp(base, alignment); | |
234 ASSERT_LE(base, aligned_base); | |
235 | |
236 // Unmap extra memory reserved before and after the desired block. | |
237 if (aligned_base != base) { | |
238 size_t prefix_size = static_cast<size_t>(aligned_base - base); | |
239 OS::Free(base, prefix_size); | |
240 request_size -= prefix_size; | |
241 } | |
242 | |
243 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); | |
244 ASSERT_LE(aligned_size, request_size); | |
245 | |
246 if (aligned_size != request_size) { | |
247 size_t suffix_size = request_size - aligned_size; | |
248 OS::Free(aligned_base + aligned_size, suffix_size); | |
249 request_size -= suffix_size; | |
250 } | |
251 | |
252 ASSERT(aligned_size == request_size); | |
253 | |
254 address_ = static_cast<void*>(aligned_base); | |
255 size_ = aligned_size; | |
256 } | |
257 | |
258 | |
259 VirtualMemory::~VirtualMemory() { | |
260 if (IsReserved()) { | |
261 bool result = ReleaseRegion(address(), size()); | |
262 ASSERT(result); | |
263 USE(result); | |
264 } | |
265 } | |
266 | |
267 | |
268 bool VirtualMemory::IsReserved() { | |
269 return address_ != NULL; | |
270 } | |
271 | |
272 | |
273 void VirtualMemory::Reset() { | |
274 address_ = NULL; | |
275 size_ = 0; | |
276 } | |
277 | |
278 | |
279 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | |
280 return CommitRegion(address, size, is_executable); | |
281 } | |
282 | |
283 | |
284 bool VirtualMemory::Uncommit(void* address, size_t size) { | |
285 return UncommitRegion(address, size); | |
286 } | |
287 | |
288 | |
289 bool VirtualMemory::Guard(void* address) { | |
290 OS::Guard(address, OS::CommitPageSize()); | |
291 return true; | |
292 } | |
293 | |
294 | |
295 void* VirtualMemory::ReserveRegion(size_t size) { | |
296 void* result = mmap(OS::GetRandomMmapAddr(), | |
297 size, | |
298 PROT_NONE, | |
299 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | |
300 kMmapFd, | |
301 kMmapFdOffset); | |
302 | |
303 if (result == MAP_FAILED) return NULL; | |
304 | |
305 return result; | |
306 } | |
307 | |
308 | |
309 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
310 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
311 if (MAP_FAILED == mmap(base, | |
312 size, | |
313 prot, | |
314 MAP_PRIVATE | MAP_ANON | MAP_FIXED, | |
315 kMmapFd, | |
316 kMmapFdOffset)) { | |
317 return false; | |
318 } | |
319 return true; | |
320 } | |
321 | |
322 | |
323 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | |
324 return mmap(base, | |
325 size, | |
326 PROT_NONE, | |
327 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, | |
328 kMmapFd, | |
329 kMmapFdOffset) != MAP_FAILED; | |
330 } | |
331 | |
332 | |
333 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | |
334 return munmap(base, size) == 0; | |
335 } | |
336 | |
337 | |
338 bool VirtualMemory::HasLazyCommits() { | |
339 // TODO(alph): implement for the platform. | |
340 return false; | |
341 } | |
342 | |
343 } } // namespace v8::internal | 190 } } // namespace v8::internal |
OLD | NEW |