OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
130 | 130 |
131 double OS::LocalTimeOffset() { | 131 double OS::LocalTimeOffset() { |
132 time_t tv = time(NULL); | 132 time_t tv = time(NULL); |
133 struct tm* t = localtime(&tv); | 133 struct tm* t = localtime(&tv); |
134 // tm_gmtoff includes any daylight savings offset, so subtract it. | 134 // tm_gmtoff includes any daylight savings offset, so subtract it. |
135 return static_cast<double>(t->tm_gmtoff * msPerSecond - | 135 return static_cast<double>(t->tm_gmtoff * msPerSecond - |
136 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); | 136 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 void* OS::Allocate(const size_t requested, | |
141 size_t* allocated, | |
142 bool is_executable) { | |
143 const size_t msize = RoundUp(requested, AllocateAlignment()); | |
144 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
145 void* addr = OS::GetRandomMmapAddr(); | |
146 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
147 if (mbase == MAP_FAILED) { | |
148 LOG(i::Isolate::Current(), | |
149 StringEvent("OS::Allocate", "mmap failed")); | |
150 return NULL; | |
151 } | |
152 *allocated = msize; | |
153 return mbase; | |
154 } | |
155 | |
156 | |
157 void OS::DumpBacktrace() { | 140 void OS::DumpBacktrace() { |
158 // backtrace is a glibc extension. | 141 // backtrace is a glibc extension. |
159 #if defined(__GLIBC__) && !defined(__UCLIBC__) | 142 #if defined(__GLIBC__) && !defined(__UCLIBC__) |
160 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); | 143 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); |
161 #endif | 144 #endif |
162 } | 145 } |
163 | 146 |
164 | 147 |
165 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 148 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
166 public: | 149 public: |
(...skipping 10 matching lines...) Expand all Loading... |
177 | 160 |
178 | 161 |
179 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { | 162 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { |
180 FILE* file = fopen(name, "r+"); | 163 FILE* file = fopen(name, "r+"); |
181 if (file == NULL) return NULL; | 164 if (file == NULL) return NULL; |
182 | 165 |
183 fseek(file, 0, SEEK_END); | 166 fseek(file, 0, SEEK_END); |
184 int size = ftell(file); | 167 int size = ftell(file); |
185 | 168 |
186 void* memory = | 169 void* memory = |
187 mmap(OS::GetRandomMmapAddr(), | 170 mmap(NULL, |
188 size, | 171 size, |
189 PROT_READ | PROT_WRITE, | 172 PROT_READ | PROT_WRITE, |
190 MAP_SHARED, | 173 MAP_SHARED, |
191 fileno(file), | 174 fileno(file), |
192 0); | 175 0); |
| 176 if (memory == MAP_FAILED) { |
| 177 fclose(file); |
| 178 return NULL; |
| 179 } |
193 return new PosixMemoryMappedFile(file, memory, size); | 180 return new PosixMemoryMappedFile(file, memory, size); |
194 } | 181 } |
195 | 182 |
196 | 183 |
197 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, | 184 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, |
198 void* initial) { | 185 void* initial) { |
199 FILE* file = fopen(name, "w+"); | 186 FILE* file = fopen(name, "w+"); |
200 if (file == NULL) return NULL; | 187 if (file == NULL) return NULL; |
201 int result = fwrite(initial, size, 1, file); | 188 int result = fwrite(initial, size, 1, file); |
202 if (result < 1) { | 189 if (result < 1) { |
203 fclose(file); | 190 fclose(file); |
204 return NULL; | 191 return NULL; |
205 } | 192 } |
206 void* memory = | 193 void* memory = |
207 mmap(OS::GetRandomMmapAddr(), | 194 mmap(NULL, |
208 size, | 195 size, |
209 PROT_READ | PROT_WRITE, | 196 PROT_READ | PROT_WRITE, |
210 MAP_SHARED, | 197 MAP_SHARED, |
211 fileno(file), | 198 fileno(file), |
212 0); | 199 0); |
| 200 if (memory == MAP_FAILED) { |
| 201 fclose(file); |
| 202 return NULL; |
| 203 } |
213 return new PosixMemoryMappedFile(file, memory, size); | 204 return new PosixMemoryMappedFile(file, memory, size); |
214 } | 205 } |
215 | 206 |
216 | 207 |
217 PosixMemoryMappedFile::~PosixMemoryMappedFile() { | 208 PosixMemoryMappedFile::~PosixMemoryMappedFile() { |
218 if (memory_) OS::Free(memory_, size_); | 209 int result = munmap(memory_, size_); |
| 210 ASSERT_EQ(0, result); |
| 211 USE(result); |
219 fclose(file_); | 212 fclose(file_); |
220 } | 213 } |
221 | 214 |
222 | 215 |
223 void OS::LogSharedLibraryAddresses() { | 216 void OS::LogSharedLibraryAddresses() { |
224 // This function assumes that the layout of the file is as follows: | 217 // This function assumes that the layout of the file is as follows: |
225 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] | 218 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] |
226 // If we encounter an unexpected situation we abort scanning further entries. | 219 // If we encounter an unexpected situation we abort scanning further entries. |
227 FILE* fp = fopen("/proc/self/maps", "r"); | 220 FILE* fp = fopen("/proc/self/maps", "r"); |
228 if (fp == NULL) return; | 221 if (fp == NULL) return; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
289 // do a mmap with a name known by ll_prof.py and immediately munmap | 282 // do a mmap with a name known by ll_prof.py and immediately munmap |
290 // it. This injects a GC marker into the stream of events generated | 283 // it. This injects a GC marker into the stream of events generated |
291 // by the kernel and allows us to synchronize V8 code log and the | 284 // by the kernel and allows us to synchronize V8 code log and the |
292 // kernel log. | 285 // kernel log. |
293 int size = sysconf(_SC_PAGESIZE); | 286 int size = sysconf(_SC_PAGESIZE); |
294 FILE* f = fopen(FLAG_gc_fake_mmap, "w+"); | 287 FILE* f = fopen(FLAG_gc_fake_mmap, "w+"); |
295 if (f == NULL) { | 288 if (f == NULL) { |
296 OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); | 289 OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); |
297 OS::Abort(); | 290 OS::Abort(); |
298 } | 291 } |
299 void* addr = mmap(OS::GetRandomMmapAddr(), | 292 void* addr = mmap(NULL, |
300 size, | 293 size, |
301 #if defined(__native_client__) | 294 #if defined(__native_client__) |
302 // The Native Client port of V8 uses an interpreter, | 295 // The Native Client port of V8 uses an interpreter, |
303 // so code pages don't need PROT_EXEC. | 296 // so code pages don't need PROT_EXEC. |
304 PROT_READ, | 297 PROT_READ, |
305 #else | 298 #else |
306 PROT_READ | PROT_EXEC, | 299 PROT_READ | PROT_EXEC, |
307 #endif | 300 #endif |
308 MAP_PRIVATE, | 301 MAP_PRIVATE, |
309 fileno(f), | 302 fileno(f), |
310 0); | 303 0); |
311 ASSERT(addr != MAP_FAILED); | 304 ASSERT(addr != MAP_FAILED); |
312 OS::Free(addr, size); | 305 int result = munmap(addr, size); |
| 306 ASSERT_EQ(0, result); |
| 307 USE(result); |
313 fclose(f); | 308 fclose(f); |
314 } | 309 } |
315 | 310 |
316 | 311 |
317 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 312 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
318 // backtrace is a glibc extension. | 313 // backtrace is a glibc extension. |
319 #if defined(__GLIBC__) && !defined(__UCLIBC__) | 314 #if defined(__GLIBC__) && !defined(__UCLIBC__) |
320 return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); | 315 return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); |
321 #else | 316 #else |
322 return 0; | 317 return 0; |
323 #endif | 318 #endif |
324 } | 319 } |
325 | 320 |
326 | |
327 // Constants used for mmap. | |
328 static const int kMmapFd = -1; | |
329 static const int kMmapFdOffset = 0; | |
330 | |
331 | |
332 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } | |
333 | |
334 | |
335 VirtualMemory::VirtualMemory(size_t size) | |
336 : address_(ReserveRegion(size)), size_(size) { } | |
337 | |
338 | |
339 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | |
340 : address_(NULL), size_(0) { | |
341 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); | |
342 size_t request_size = RoundUp(size + alignment, | |
343 static_cast<intptr_t>(OS::AllocateAlignment())); | |
344 void* reservation = mmap(OS::GetRandomMmapAddr(), | |
345 request_size, | |
346 PROT_NONE, | |
347 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | |
348 kMmapFd, | |
349 kMmapFdOffset); | |
350 if (reservation == MAP_FAILED) return; | |
351 | |
352 Address base = static_cast<Address>(reservation); | |
353 Address aligned_base = RoundUp(base, alignment); | |
354 ASSERT_LE(base, aligned_base); | |
355 | |
356 // Unmap extra memory reserved before and after the desired block. | |
357 if (aligned_base != base) { | |
358 size_t prefix_size = static_cast<size_t>(aligned_base - base); | |
359 OS::Free(base, prefix_size); | |
360 request_size -= prefix_size; | |
361 } | |
362 | |
363 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); | |
364 ASSERT_LE(aligned_size, request_size); | |
365 | |
366 if (aligned_size != request_size) { | |
367 size_t suffix_size = request_size - aligned_size; | |
368 OS::Free(aligned_base + aligned_size, suffix_size); | |
369 request_size -= suffix_size; | |
370 } | |
371 | |
372 ASSERT(aligned_size == request_size); | |
373 | |
374 address_ = static_cast<void*>(aligned_base); | |
375 size_ = aligned_size; | |
376 } | |
377 | |
378 | |
379 VirtualMemory::~VirtualMemory() { | |
380 if (IsReserved()) { | |
381 bool result = ReleaseRegion(address(), size()); | |
382 ASSERT(result); | |
383 USE(result); | |
384 } | |
385 } | |
386 | |
387 | |
388 bool VirtualMemory::IsReserved() { | |
389 return address_ != NULL; | |
390 } | |
391 | |
392 | |
393 void VirtualMemory::Reset() { | |
394 address_ = NULL; | |
395 size_ = 0; | |
396 } | |
397 | |
398 | |
399 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { | |
400 return CommitRegion(address, size, is_executable); | |
401 } | |
402 | |
403 | |
404 bool VirtualMemory::Uncommit(void* address, size_t size) { | |
405 return UncommitRegion(address, size); | |
406 } | |
407 | |
408 | |
409 bool VirtualMemory::Guard(void* address) { | |
410 OS::Guard(address, OS::CommitPageSize()); | |
411 return true; | |
412 } | |
413 | |
414 | |
415 void* VirtualMemory::ReserveRegion(size_t size) { | |
416 void* result = mmap(OS::GetRandomMmapAddr(), | |
417 size, | |
418 PROT_NONE, | |
419 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, | |
420 kMmapFd, | |
421 kMmapFdOffset); | |
422 | |
423 if (result == MAP_FAILED) return NULL; | |
424 | |
425 return result; | |
426 } | |
427 | |
428 | |
429 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | |
430 #if defined(__native_client__) | |
431 // The Native Client port of V8 uses an interpreter, | |
432 // so code pages don't need PROT_EXEC. | |
433 int prot = PROT_READ | PROT_WRITE; | |
434 #else | |
435 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | |
436 #endif | |
437 if (MAP_FAILED == mmap(base, | |
438 size, | |
439 prot, | |
440 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, | |
441 kMmapFd, | |
442 kMmapFdOffset)) { | |
443 return false; | |
444 } | |
445 | |
446 return true; | |
447 } | |
448 | |
449 | |
450 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | |
451 return mmap(base, | |
452 size, | |
453 PROT_NONE, | |
454 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, | |
455 kMmapFd, | |
456 kMmapFdOffset) != MAP_FAILED; | |
457 } | |
458 | |
459 | |
460 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | |
461 return munmap(base, size) == 0; | |
462 } | |
463 | |
464 | |
465 bool VirtualMemory::HasLazyCommits() { | |
466 return true; | |
467 } | |
468 | |
469 } } // namespace v8::internal | 321 } } // namespace v8::internal |
OLD | NEW |