OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 return tzname[0]; // The location of the timezone string on Solaris. | 89 return tzname[0]; // The location of the timezone string on Solaris. |
90 } | 90 } |
91 | 91 |
92 | 92 |
93 double OS::LocalTimeOffset() { | 93 double OS::LocalTimeOffset() { |
94 tzset(); | 94 tzset(); |
95 return -static_cast<double>(timezone * msPerSecond); | 95 return -static_cast<double>(timezone * msPerSecond); |
96 } | 96 } |
97 | 97 |
98 | 98 |
99 void* OS::Allocate(const size_t requested, | |
100 size_t* allocated, | |
101 bool is_executable) { | |
102 const size_t msize = RoundUp(requested, getpagesize()); | |
103 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
104 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); | |
105 | |
106 if (mbase == MAP_FAILED) { | |
107 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); | |
108 return NULL; | |
109 } | |
110 *allocated = msize; | |
111 return mbase; | |
112 } | |
113 | |
114 | |
115 void OS::DumpBacktrace() { | 99 void OS::DumpBacktrace() { |
116 // Currently unsupported. | 100 // Currently unsupported. |
117 } | 101 } |
118 | 102 |
119 | 103 |
120 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 104 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
121 public: | 105 public: |
122 PosixMemoryMappedFile(FILE* file, void* memory, int size) | 106 PosixMemoryMappedFile(FILE* file, void* memory, int size) |
123 : file_(file), memory_(memory), size_(size) { } | 107 : file_(file), memory_(memory), size_(size) { } |
124 virtual ~PosixMemoryMappedFile(); | 108 virtual ~PosixMemoryMappedFile(); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
217 | 201 |
218 if (getcontext(&ctx) < 0) return kStackWalkError; | 202 if (getcontext(&ctx) < 0) return kStackWalkError; |
219 | 203 |
220 if (!walkcontext(&ctx, StackWalkCallback, &walker)) { | 204 if (!walkcontext(&ctx, StackWalkCallback, &walker)) { |
221 return kStackWalkError; | 205 return kStackWalkError; |
222 } | 206 } |
223 | 207 |
224 return walker.index; | 208 return walker.index; |
225 } | 209 } |
226 | 210 |
227 | |
228 // Constants used for mmap. | |
229 static const int kMmapFd = -1; | |
230 static const int kMmapFdOffset = 0; | |
231 | |
232 | |
233 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | |
234 | |
235 | |
236 VirtualMemory::VirtualMemory(size_t size) | |
237 : address_(ReserveRegion(size)), size_(size) { } | |
238 | |
239 | |
240 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | |
241 : address_(NULL), size_(0) { | |
242 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
243 size_t request_size = RoundUp(size + alignment, | |
244 static_cast<intptr_t>(OS::AllocateAlignment())); | |
245 void* reservation = mmap(OS::GetRandomMmapAddr(), | |
246 request_size, | |
247 PROT_NONE, | |
248 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | |
249 kMmapFd, | |
250 kMmapFdOffset); | |
251 if (reservation == MAP_FAILED) return; | |
252 | |
253 Address base = static_cast<Address>(reservation); | |
254 Address aligned_base = RoundUp(base, alignment); | |
255 ASSERT_LE(base, aligned_base); | |
256 | |
257 // Unmap extra memory reserved before and after the desired block. | |
258 if (aligned_base != base) { | |
259 size_t prefix_size = static_cast<size_t>(aligned_base - base); | |
260 OS::Free(base, prefix_size); | |
261 request_size -= prefix_size; | |
262 } | |
263 | |
264 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); | |
265 ASSERT_LE(aligned_size, request_size); | |
266 | |
267 if (aligned_size != request_size) { | |
268 size_t suffix_size = request_size - aligned_size; | |
269 OS::Free(aligned_base + aligned_size, suffix_size); | |
270 request_size -= suffix_size; | |
271 } | |
272 | |
273 ASSERT(aligned_size == request_size); | |
274 | |
275 address_ = static_cast<void*>(aligned_base); | |
276 size_ = aligned_size; | |
277 } | |
278 | |
279 | |
280 VirtualMemory::~VirtualMemory() { | |
281 if (IsReserved()) { | |
282 bool result = ReleaseRegion(address(), size()); | |
283 ASSERT(result); | |
284 USE(result); | |
285 } | |
286 } | |
287 | |
288 | |
289 bool VirtualMemory::IsReserved() { | |
290 return address_ != NULL; | |
291 } | |
292 | |
293 | |
294 void VirtualMemory::Reset() { | |
295 address_ = NULL; | |
296 size_ = 0; | |
297 } | |
298 | |
299 | |
300 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | |
301 return CommitRegion(address, size, is_executable); | |
302 } | |
303 | |
304 | |
305 bool VirtualMemory::Uncommit(void* address, size_t size) { | |
306 return UncommitRegion(address, size); | |
307 } | |
308 | |
309 | |
310 bool VirtualMemory::Guard(void* address) { | |
311 OS::Guard(address, OS::CommitPageSize()); | |
312 return true; | |
313 } | |
314 | |
315 | |
316 void* VirtualMemory::ReserveRegion(size_t size) { | |
317 void* result = mmap(OS::GetRandomMmapAddr(), | |
318 size, | |
319 PROT_NONE, | |
320 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | |
321 kMmapFd, | |
322 kMmapFdOffset); | |
323 | |
324 if (result == MAP_FAILED) return NULL; | |
325 | |
326 return result; | |
327 } | |
328 | |
329 | |
330 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
331 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
332 if (MAP_FAILED == mmap(base, | |
333 size, | |
334 prot, | |
335 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, | |
336 kMmapFd, | |
337 kMmapFdOffset)) { | |
338 return false; | |
339 } | |
340 return true; | |
341 } | |
342 | |
343 | |
344 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | |
345 return mmap(base, | |
346 size, | |
347 PROT_NONE, | |
348 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, | |
349 kMmapFd, | |
350 kMmapFdOffset) != MAP_FAILED; | |
351 } | |
352 | |
353 | |
354 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | |
355 return munmap(base, size) == 0; | |
356 } | |
357 | |
358 | |
359 bool VirtualMemory::HasLazyCommits() { | |
360 // TODO(alph): implement for the platform. | |
361 return false; | |
362 } | |
363 | |
364 } } // namespace v8::internal | 211 } } // namespace v8::internal |
OLD | NEW |