OLD | NEW |
1 // Copyright 2006-2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 14 matching lines...) Expand all Loading... |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 // Platform specific code for OpenBSD goes here. For the POSIX comaptible parts | 28 // Platform specific code for OpenBSD goes here. For the POSIX comaptible parts |
29 // the implementation is in platform-posix.cc. | 29 // the implementation is in platform-posix.cc. |
30 | 30 |
31 #include <pthread.h> | 31 #include <pthread.h> |
32 #include <semaphore.h> | 32 #include <semaphore.h> |
33 #include <signal.h> | 33 #include <signal.h> |
34 #include <sys/time.h> | 34 #include <sys/time.h> |
35 #include <sys/resource.h> | 35 #include <sys/resource.h> |
| 36 #include <sys/syscall.h> |
36 #include <sys/types.h> | 37 #include <sys/types.h> |
37 #include <stdlib.h> | 38 #include <stdlib.h> |
38 | 39 |
39 #include <sys/types.h> // mmap & munmap | 40 #include <sys/types.h> // mmap & munmap |
40 #include <sys/mman.h> // mmap & munmap | 41 #include <sys/mman.h> // mmap & munmap |
41 #include <sys/stat.h> // open | 42 #include <sys/stat.h> // open |
42 #include <sys/fcntl.h> // open | 43 #include <fcntl.h> // open |
43 #include <unistd.h> // getpagesize | 44 #include <unistd.h> // sysconf |
44 #include <execinfo.h> // backtrace, backtrace_symbols | 45 #include <execinfo.h> // backtrace, backtrace_symbols |
45 #include <strings.h> // index | 46 #include <strings.h> // index |
46 #include <errno.h> | 47 #include <errno.h> |
47 #include <stdarg.h> | 48 #include <stdarg.h> |
48 #include <limits.h> | |
49 | 49 |
50 #undef MAP_TYPE | 50 #undef MAP_TYPE |
51 | 51 |
52 #include "v8.h" | 52 #include "v8.h" |
53 #include "v8threads.h" | |
54 | 53 |
55 #include "platform.h" | 54 #include "platform.h" |
| 55 #include "v8threads.h" |
56 #include "vm-state-inl.h" | 56 #include "vm-state-inl.h" |
57 | 57 |
58 | 58 |
59 namespace v8 { | 59 namespace v8 { |
60 namespace internal { | 60 namespace internal { |
61 | 61 |
62 // 0 is never a valid thread id on OpenBSD since tids and pids share a | 62 // 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a |
63 // name space and pid 0 is used to kill the group (see man 2 kill). | 63 // name space and pid 0 is reserved (see man 2 kill). |
64 static const pthread_t kNoThread = (pthread_t) 0; | 64 static const pthread_t kNoThread = (pthread_t) 0; |
65 | 65 |
66 | 66 |
67 double ceiling(double x) { | 67 double ceiling(double x) { |
68 // Correct as on OS X | 68 return ceil(x); |
69 if (-1.0 < x && x < 0.0) { | |
70 return -0.0; | |
71 } else { | |
72 return ceil(x); | |
73 } | |
74 } | 69 } |
75 | 70 |
76 | 71 |
77 static Mutex* limit_mutex = NULL; | 72 static Mutex* limit_mutex = NULL; |
78 | 73 |
79 | 74 |
| 75 static void* GetRandomMmapAddr() { |
| 76 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 77 // Note that the current isolate isn't set up in a call path via |
| 78 // CpuFeatures::Probe. We don't care about randomization in this case because |
| 79 // the code page is immediately freed. |
| 80 if (isolate != NULL) { |
| 81 #ifdef V8_TARGET_ARCH_X64 |
| 82 uint64_t rnd1 = V8::RandomPrivate(isolate); |
| 83 uint64_t rnd2 = V8::RandomPrivate(isolate); |
| 84 uint64_t raw_addr = (rnd1 << 32) ^ rnd2; |
| 85 // Currently available CPUs have 48 bits of virtual addressing. Truncate |
| 86 // the hint address to 46 bits to give the kernel a fighting chance of |
| 87 // fulfilling our placement request. |
| 88 raw_addr &= V8_UINT64_C(0x3ffffffff000); |
| 89 #else |
| 90 uint32_t raw_addr = V8::RandomPrivate(isolate); |
| 91 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a |
| 92 // variety of ASLR modes (PAE kernel, NX compat mode, etc). |
| 93 raw_addr &= 0x3ffff000; |
| 94 raw_addr += 0x20000000; |
| 95 #endif |
| 96 return reinterpret_cast<void*>(raw_addr); |
| 97 } |
| 98 return NULL; |
| 99 } |
| 100 |
| 101 |
80 void OS::Setup() { | 102 void OS::Setup() { |
81 // Seed the random number generator. | 103 // Seed the random number generator. We preserve microsecond resolution. |
82 // Convert the current time to a 64-bit integer first, before converting it | 104 uint64_t seed = Ticks() ^ (getpid() << 16); |
83 // to an unsigned. Going directly can cause an overflow and the seed to be | |
84 // set to all ones. The seed will be identical for different instances that | |
85 // call this setup code within the same millisecond. | |
86 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); | |
87 srandom(static_cast<unsigned int>(seed)); | 105 srandom(static_cast<unsigned int>(seed)); |
88 limit_mutex = CreateMutex(); | 106 limit_mutex = CreateMutex(); |
89 } | 107 } |
90 | 108 |
91 | 109 |
92 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { | |
93 __asm__ __volatile__("" : : : "memory"); | |
94 *ptr = value; | |
95 } | |
96 | |
97 | |
98 uint64_t OS::CpuFeaturesImpliedByPlatform() { | 110 uint64_t OS::CpuFeaturesImpliedByPlatform() { |
99 return 0; // OpenBSD runs on anything. | 111 return 0; |
100 } | 112 } |
101 | 113 |
102 | 114 |
103 int OS::ActivationFrameAlignment() { | 115 int OS::ActivationFrameAlignment() { |
104 // 16 byte alignment on OpenBSD | 116 // With gcc 4.4 the tree vectorization optimizer can generate code |
| 117 // that requires 16 byte alignment such as movdqa on x86. |
105 return 16; | 118 return 16; |
106 } | 119 } |
107 | 120 |
108 | 121 |
| 122 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { |
| 123 __asm__ __volatile__("" : : : "memory"); |
| 124 // An x86 store acts as a release barrier. |
| 125 *ptr = value; |
| 126 } |
| 127 |
| 128 |
109 const char* OS::LocalTimezone(double time) { | 129 const char* OS::LocalTimezone(double time) { |
110 if (isnan(time)) return ""; | 130 if (isnan(time)) return ""; |
111 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); | 131 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); |
112 struct tm* t = localtime(&tv); | 132 struct tm* t = localtime(&tv); |
113 if (NULL == t) return ""; | 133 if (NULL == t) return ""; |
114 return t->tm_zone; | 134 return t->tm_zone; |
115 } | 135 } |
116 | 136 |
117 | 137 |
118 double OS::LocalTimeOffset() { | 138 double OS::LocalTimeOffset() { |
(...skipping 24 matching lines...) Expand all Loading... |
143 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | 163 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
144 } | 164 } |
145 | 165 |
146 | 166 |
147 bool OS::IsOutsideAllocatedSpace(void* address) { | 167 bool OS::IsOutsideAllocatedSpace(void* address) { |
148 return address < lowest_ever_allocated || address >= highest_ever_allocated; | 168 return address < lowest_ever_allocated || address >= highest_ever_allocated; |
149 } | 169 } |
150 | 170 |
151 | 171 |
152 size_t OS::AllocateAlignment() { | 172 size_t OS::AllocateAlignment() { |
153 return getpagesize(); | 173 return sysconf(_SC_PAGESIZE); |
154 } | 174 } |
155 | 175 |
156 | 176 |
157 void* OS::Allocate(const size_t requested, | 177 void* OS::Allocate(const size_t requested, |
158 size_t* allocated, | 178 size_t* allocated, |
159 bool executable) { | 179 bool is_executable) { |
160 const size_t msize = RoundUp(requested, getpagesize()); | 180 const size_t msize = RoundUp(requested, AllocateAlignment()); |
161 int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); | 181 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
162 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); | 182 void* addr = GetRandomMmapAddr(); |
163 | 183 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); |
164 if (mbase == MAP_FAILED) { | 184 if (mbase == MAP_FAILED) { |
165 LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); | 185 LOG(i::Isolate::Current(), |
| 186 StringEvent("OS::Allocate", "mmap failed")); |
166 return NULL; | 187 return NULL; |
167 } | 188 } |
168 *allocated = msize; | 189 *allocated = msize; |
169 UpdateAllocatedSpaceLimits(mbase, msize); | 190 UpdateAllocatedSpaceLimits(mbase, msize); |
170 return mbase; | 191 return mbase; |
171 } | 192 } |
172 | 193 |
173 | 194 |
174 void OS::Free(void* buf, const size_t length) { | 195 void OS::Free(void* address, const size_t size) { |
175 // TODO(1240712): munmap has a return value which is ignored here. | 196 // TODO(1240712): munmap has a return value which is ignored here. |
176 int result = munmap(buf, length); | 197 int result = munmap(address, size); |
177 USE(result); | 198 USE(result); |
178 ASSERT(result == 0); | 199 ASSERT(result == 0); |
179 } | 200 } |
180 | 201 |
181 | 202 |
182 void OS::Sleep(int milliseconds) { | 203 void OS::Sleep(int milliseconds) { |
183 unsigned int ms = static_cast<unsigned int>(milliseconds); | 204 unsigned int ms = static_cast<unsigned int>(milliseconds); |
184 usleep(1000 * ms); | 205 usleep(1000 * ms); |
185 } | 206 } |
186 | 207 |
187 | 208 |
188 void OS::Abort() { | 209 void OS::Abort() { |
189 // Redirect to std abort to signal abnormal program termination. | 210 // Redirect to std abort to signal abnormal program termination. |
190 abort(); | 211 abort(); |
191 } | 212 } |
192 | 213 |
193 | 214 |
194 void OS::DebugBreak() { | 215 void OS::DebugBreak() { |
195 #if (defined(__arm__) || defined(__thumb__)) | |
196 # if defined(CAN_USE_ARMV5_INSTRUCTIONS) | |
197 asm("bkpt 0"); | |
198 # endif | |
199 #else | |
200 asm("int $3"); | 216 asm("int $3"); |
201 #endif | |
202 } | 217 } |
203 | 218 |
204 | 219 |
205 class PosixMemoryMappedFile : public OS::MemoryMappedFile { | 220 class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
206 public: | 221 public: |
207 PosixMemoryMappedFile(FILE* file, void* memory, int size) | 222 PosixMemoryMappedFile(FILE* file, void* memory, int size) |
208 : file_(file), memory_(memory), size_(size) { } | 223 : file_(file), memory_(memory), size_(size) { } |
209 virtual ~PosixMemoryMappedFile(); | 224 virtual ~PosixMemoryMappedFile(); |
210 virtual void* memory() { return memory_; } | 225 virtual void* memory() { return memory_; } |
211 virtual int size() { return size_; } | 226 virtual int size() { return size_; } |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
243 return new PosixMemoryMappedFile(file, memory, size); | 258 return new PosixMemoryMappedFile(file, memory, size); |
244 } | 259 } |
245 | 260 |
246 | 261 |
247 PosixMemoryMappedFile::~PosixMemoryMappedFile() { | 262 PosixMemoryMappedFile::~PosixMemoryMappedFile() { |
248 if (memory_) OS::Free(memory_, size_); | 263 if (memory_) OS::Free(memory_, size_); |
249 fclose(file_); | 264 fclose(file_); |
250 } | 265 } |
251 | 266 |
252 | 267 |
253 static unsigned StringToLong(char* buffer) { | |
254 return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT | |
255 } | |
256 | |
257 | |
258 void OS::LogSharedLibraryAddresses() { | 268 void OS::LogSharedLibraryAddresses() { |
259 static const int MAP_LENGTH = 1024; | 269 // This function assumes that the layout of the file is as follows: |
260 int fd = open("/proc/self/maps", O_RDONLY); | 270 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] |
261 if (fd < 0) return; | 271 // If we encounter an unexpected situation we abort scanning further entries. |
| 272 FILE* fp = fopen("/proc/self/maps", "r"); |
| 273 if (fp == NULL) return; |
| 274 |
| 275 // Allocate enough room to be able to store a full file name. |
| 276 const int kLibNameLen = FILENAME_MAX + 1; |
| 277 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); |
| 278 |
| 279 i::Isolate* isolate = ISOLATE; |
| 280 // This loop will terminate once the scanning hits an EOF. |
262 while (true) { | 281 while (true) { |
263 char addr_buffer[11]; | 282 uintptr_t start, end; |
264 addr_buffer[0] = '0'; | 283 char attr_r, attr_w, attr_x, attr_p; |
265 addr_buffer[1] = 'x'; | 284 // Parse the addresses and permission bits at the beginning of the line. |
266 addr_buffer[10] = 0; | 285 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; |
267 int result = read(fd, addr_buffer + 2, 8); | 286 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; |
268 if (result < 8) break; | 287 |
269 unsigned start = StringToLong(addr_buffer); | 288 int c; |
270 result = read(fd, addr_buffer + 2, 1); | 289 if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { |
271 if (result < 1) break; | 290 // Found a read-only executable entry. Skip characters until we reach |
272 if (addr_buffer[2] != '-') break; | 291 // the beginning of the filename or the end of the line. |
273 result = read(fd, addr_buffer + 2, 8); | 292 do { |
274 if (result < 8) break; | 293 c = getc(fp); |
275 unsigned end = StringToLong(addr_buffer); | 294 } while ((c != EOF) && (c != '\n') && (c != '/')); |
276 char buffer[MAP_LENGTH]; | 295 if (c == EOF) break; // EOF: Was unexpected, just exit. |
277 int bytes_read = -1; | 296 |
278 do { | 297 // Process the filename if found. |
279 bytes_read++; | 298 if (c == '/') { |
280 if (bytes_read >= MAP_LENGTH - 1) | 299 ungetc(c, fp); // Push the '/' back into the stream to be read below. |
281 break; | 300 |
282 result = read(fd, buffer + bytes_read, 1); | 301 // Read to the end of the line. Exit if the read fails. |
283 if (result < 1) break; | 302 if (fgets(lib_name, kLibNameLen, fp) == NULL) break; |
284 } while (buffer[bytes_read] != '\n'); | 303 |
285 buffer[bytes_read] = 0; | 304 // Drop the newline character read by fgets. We do not need to check |
286 // Ignore mappings that are not executable. | 305 // for a zero-length string because we know that we at least read the |
287 if (buffer[3] != 'x') continue; | 306 // '/' character. |
288 char* start_of_path = index(buffer, '/'); | 307 lib_name[strlen(lib_name) - 1] = '\0'; |
289 // There may be no filename in this line. Skip to next. | 308 } else { |
290 if (start_of_path == NULL) continue; | 309 // No library name found, just record the raw address range. |
291 buffer[bytes_read] = 0; | 310 snprintf(lib_name, kLibNameLen, |
292 LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); | 311 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); |
| 312 } |
| 313 LOG(isolate, SharedLibraryEvent(lib_name, start, end)); |
| 314 } else { |
| 315 // Entry not describing executable data. Skip to end of line to setup |
| 316 // reading the next entry. |
| 317 do { |
| 318 c = getc(fp); |
| 319 } while ((c != EOF) && (c != '\n')); |
| 320 if (c == EOF) break; |
| 321 } |
293 } | 322 } |
294 close(fd); | 323 free(lib_name); |
| 324 fclose(fp); |
295 } | 325 } |
296 | 326 |
297 | 327 |
| 328 static const char kGCFakeMmap[] = "/tmp/__v8_gc__"; |
| 329 |
| 330 |
298 void OS::SignalCodeMovingGC() { | 331 void OS::SignalCodeMovingGC() { |
| 332 // Support for ll_prof.py. |
| 333 // |
| 334 // The Linux profiler built into the kernel logs all mmap's with |
| 335 // PROT_EXEC so that analysis tools can properly attribute ticks. We |
| 336 // do a mmap with a name known by ll_prof.py and immediately munmap |
| 337 // it. This injects a GC marker into the stream of events generated |
| 338 // by the kernel and allows us to synchronize V8 code log and the |
| 339 // kernel log. |
| 340 int size = sysconf(_SC_PAGESIZE); |
| 341 FILE* f = fopen(kGCFakeMmap, "w+"); |
| 342 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, |
| 343 fileno(f), 0); |
| 344 ASSERT(addr != MAP_FAILED); |
| 345 OS::Free(addr, size); |
| 346 fclose(f); |
299 } | 347 } |
300 | 348 |
301 | 349 |
302 int OS::StackWalk(Vector<OS::StackFrame> frames) { | 350 int OS::StackWalk(Vector<OS::StackFrame> frames) { |
| 351 // backtrace is a glibc extension. |
303 int frames_size = frames.length(); | 352 int frames_size = frames.length(); |
304 ScopedVector<void*> addresses(frames_size); | 353 ScopedVector<void*> addresses(frames_size); |
305 | 354 |
306 int frames_count = backtrace(addresses.start(), frames_size); | 355 int frames_count = backtrace(addresses.start(), frames_size); |
307 | 356 |
308 char** symbols = backtrace_symbols(addresses.start(), frames_count); | 357 char** symbols = backtrace_symbols(addresses.start(), frames_count); |
309 if (symbols == NULL) { | 358 if (symbols == NULL) { |
310 return kStackWalkError; | 359 return kStackWalkError; |
311 } | 360 } |
312 | 361 |
(...skipping 11 matching lines...) Expand all Loading... |
324 free(symbols); | 373 free(symbols); |
325 | 374 |
326 return frames_count; | 375 return frames_count; |
327 } | 376 } |
328 | 377 |
329 | 378 |
330 // Constants used for mmap. | 379 // Constants used for mmap. |
331 static const int kMmapFd = -1; | 380 static const int kMmapFd = -1; |
332 static const int kMmapFdOffset = 0; | 381 static const int kMmapFdOffset = 0; |
333 | 382 |
| 383 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
334 | 384 |
335 VirtualMemory::VirtualMemory(size_t size) { | 385 VirtualMemory::VirtualMemory(size_t size) { |
336 address_ = mmap(NULL, size, PROT_NONE, | 386 address_ = ReserveRegion(size); |
337 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | |
338 kMmapFd, kMmapFdOffset); | |
339 size_ = size; | 387 size_ = size; |
340 } | 388 } |
341 | 389 |
342 | 390 |
| 391 VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
| 392 : address_(NULL), size_(0) { |
| 393 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| 394 size_t request_size = RoundUp(size + alignment, |
| 395 static_cast<intptr_t>(OS::AllocateAlignment())); |
| 396 void* reservation = mmap(GetRandomMmapAddr(), |
| 397 request_size, |
| 398 PROT_NONE, |
| 399 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, |
| 400 kMmapFd, |
| 401 kMmapFdOffset); |
| 402 if (reservation == MAP_FAILED) return; |
| 403 |
| 404 Address base = static_cast<Address>(reservation); |
| 405 Address aligned_base = RoundUp(base, alignment); |
| 406 ASSERT_LE(base, aligned_base); |
| 407 |
| 408 // Unmap extra memory reserved before and after the desired block. |
| 409 if (aligned_base != base) { |
| 410 size_t prefix_size = static_cast<size_t>(aligned_base - base); |
| 411 OS::Free(base, prefix_size); |
| 412 request_size -= prefix_size; |
| 413 } |
| 414 |
| 415 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); |
| 416 ASSERT_LE(aligned_size, request_size); |
| 417 |
| 418 if (aligned_size != request_size) { |
| 419 size_t suffix_size = request_size - aligned_size; |
| 420 OS::Free(aligned_base + aligned_size, suffix_size); |
| 421 request_size -= suffix_size; |
| 422 } |
| 423 |
| 424 ASSERT(aligned_size == request_size); |
| 425 |
| 426 address_ = static_cast<void*>(aligned_base); |
| 427 size_ = aligned_size; |
| 428 } |
| 429 |
| 430 |
343 VirtualMemory::~VirtualMemory() { | 431 VirtualMemory::~VirtualMemory() { |
344 if (IsReserved()) { | 432 if (IsReserved()) { |
345 OS::Free(address(), size()); | 433 bool result = ReleaseRegion(address(), size()); |
346 address_ = MAP_FAILED | 434 ASSERT(result); |
| 435 USE(result); |
347 } | 436 } |
348 } | 437 } |
349 | 438 |
350 | 439 |
351 bool VirtualMemory::IsReserved() { | 440 bool VirtualMemory::IsReserved() { |
352 return address_ != MAP_FAILED; | 441 return address_ != NULL; |
353 } | 442 } |
354 | 443 |
355 | 444 |
356 bool VirtualMemory::Commit(void* address, size_t size, bool executable) { | 445 void VirtualMemory::Reset() { |
357 int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); | 446 address_ = NULL; |
358 if (MAP_FAILED == mmap(address, size, prot, | 447 size_ = 0; |
| 448 } |
| 449 |
| 450 |
| 451 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
| 452 return CommitRegion(address, size, is_executable); |
| 453 } |
| 454 |
| 455 |
| 456 bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 457 return UncommitRegion(address, size); |
| 458 } |
| 459 |
| 460 |
| 461 void* VirtualMemory::ReserveRegion(size_t size) { |
| 462 void* result = mmap(GetRandomMmapAddr(), |
| 463 size, |
| 464 PROT_NONE, |
| 465 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, |
| 466 kMmapFd, |
| 467 kMmapFdOffset); |
| 468 |
| 469 if (result == MAP_FAILED) return NULL; |
| 470 |
| 471 return result; |
| 472 } |
| 473 |
| 474 |
| 475 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| 476 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| 477 if (MAP_FAILED == mmap(base, |
| 478 size, |
| 479 prot, |
359 MAP_PRIVATE | MAP_ANON | MAP_FIXED, | 480 MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
360 kMmapFd, kMmapFdOffset)) { | 481 kMmapFd, |
| 482 kMmapFdOffset)) { |
361 return false; | 483 return false; |
362 } | 484 } |
363 | 485 |
364 UpdateAllocatedSpaceLimits(address, size); | 486 UpdateAllocatedSpaceLimits(base, size); |
365 return true; | 487 return true; |
366 } | 488 } |
367 | 489 |
368 | 490 |
369 bool VirtualMemory::Uncommit(void* address, size_t size) { | 491 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
370 return mmap(address, size, PROT_NONE, | 492 return mmap(base, |
| 493 size, |
| 494 PROT_NONE, |
371 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, | 495 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
372 kMmapFd, kMmapFdOffset) != MAP_FAILED; | 496 kMmapFd, |
| 497 kMmapFdOffset) != MAP_FAILED; |
| 498 } |
| 499 |
| 500 |
| 501 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| 502 return munmap(base, size) == 0; |
373 } | 503 } |
374 | 504 |
375 | 505 |
376 class Thread::PlatformData : public Malloced { | 506 class Thread::PlatformData : public Malloced { |
377 public: | 507 public: |
| 508 PlatformData() : thread_(kNoThread) {} |
| 509 |
378 pthread_t thread_; // Thread handle for pthread. | 510 pthread_t thread_; // Thread handle for pthread. |
379 }; | 511 }; |
380 | 512 |
381 | |
382 Thread::Thread(const Options& options) | 513 Thread::Thread(const Options& options) |
383 : data_(new PlatformData), | 514 : data_(new PlatformData()), |
384 stack_size_(options.stack_size) { | 515 stack_size_(options.stack_size) { |
385 set_name(options.name); | 516 set_name(options.name); |
386 } | 517 } |
387 | 518 |
388 | 519 |
389 Thread::Thread(const char* name) | 520 Thread::Thread(const char* name) |
390 : data_(new PlatformData), | 521 : data_(new PlatformData()), |
391 stack_size_(0) { | 522 stack_size_(0) { |
392 set_name(name); | 523 set_name(name); |
393 } | 524 } |
394 | 525 |
395 | 526 |
396 Thread::~Thread() { | 527 Thread::~Thread() { |
397 delete data_; | 528 delete data_; |
398 } | 529 } |
399 | 530 |
400 | 531 |
401 static void* ThreadEntry(void* arg) { | 532 static void* ThreadEntry(void* arg) { |
402 Thread* thread = reinterpret_cast<Thread*>(arg); | 533 Thread* thread = reinterpret_cast<Thread*>(arg); |
403 // This is also initialized by the first argument to pthread_create() but we | 534 // This is also initialized by the first argument to pthread_create() but we |
404 // don't know which thread will run first (the original thread or the new | 535 // don't know which thread will run first (the original thread or the new |
405 // one) so we initialize it here too. | 536 // one) so we initialize it here too. |
| 537 #ifdef PR_SET_NAME |
| 538 prctl(PR_SET_NAME, |
| 539 reinterpret_cast<unsigned long>(thread->name()), // NOLINT |
| 540 0, 0, 0); |
| 541 #endif |
406 thread->data()->thread_ = pthread_self(); | 542 thread->data()->thread_ = pthread_self(); |
407 ASSERT(thread->data()->thread_ != kNoThread); | 543 ASSERT(thread->data()->thread_ != kNoThread); |
408 thread->Run(); | 544 thread->Run(); |
409 return NULL; | 545 return NULL; |
410 } | 546 } |
411 | 547 |
412 | 548 |
413 void Thread::set_name(const char* name) { | 549 void Thread::set_name(const char* name) { |
414 strncpy(name_, name, sizeof(name_)); | 550 strncpy(name_, name, sizeof(name_)); |
415 name_[sizeof(name_) - 1] = '\0'; | 551 name_[sizeof(name_) - 1] = '\0'; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
471 class OpenBSDMutex : public Mutex { | 607 class OpenBSDMutex : public Mutex { |
472 public: | 608 public: |
473 OpenBSDMutex() { | 609 OpenBSDMutex() { |
474 pthread_mutexattr_t attrs; | 610 pthread_mutexattr_t attrs; |
475 int result = pthread_mutexattr_init(&attrs); | 611 int result = pthread_mutexattr_init(&attrs); |
476 ASSERT(result == 0); | 612 ASSERT(result == 0); |
477 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); | 613 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); |
478 ASSERT(result == 0); | 614 ASSERT(result == 0); |
479 result = pthread_mutex_init(&mutex_, &attrs); | 615 result = pthread_mutex_init(&mutex_, &attrs); |
480 ASSERT(result == 0); | 616 ASSERT(result == 0); |
| 617 USE(result); |
481 } | 618 } |
482 | 619 |
483 virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); } | 620 virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); } |
484 | 621 |
485 virtual int Lock() { | 622 virtual int Lock() { |
486 int result = pthread_mutex_lock(&mutex_); | 623 int result = pthread_mutex_lock(&mutex_); |
487 return result; | 624 return result; |
488 } | 625 } |
489 | 626 |
490 virtual int Unlock() { | 627 virtual int Unlock() { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
527 | 664 |
528 void OpenBSDSemaphore::Wait() { | 665 void OpenBSDSemaphore::Wait() { |
529 while (true) { | 666 while (true) { |
530 int result = sem_wait(&sem_); | 667 int result = sem_wait(&sem_); |
531 if (result == 0) return; // Successfully got semaphore. | 668 if (result == 0) return; // Successfully got semaphore. |
532 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. | 669 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. |
533 } | 670 } |
534 } | 671 } |
535 | 672 |
536 | 673 |
| 674 #ifndef TIMEVAL_TO_TIMESPEC |
| 675 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ |
| 676 (ts)->tv_sec = (tv)->tv_sec; \ |
| 677 (ts)->tv_nsec = (tv)->tv_usec * 1000; \ |
| 678 } while (false) |
| 679 #endif |
| 680 |
| 681 |
537 bool OpenBSDSemaphore::Wait(int timeout) { | 682 bool OpenBSDSemaphore::Wait(int timeout) { |
538 const long kOneSecondMicros = 1000000; // NOLINT | 683 const long kOneSecondMicros = 1000000; // NOLINT |
539 | 684 |
540 // Split timeout into second and nanosecond parts. | 685 // Split timeout into second and nanosecond parts. |
541 struct timeval delta; | 686 struct timeval delta; |
542 delta.tv_usec = timeout % kOneSecondMicros; | 687 delta.tv_usec = timeout % kOneSecondMicros; |
543 delta.tv_sec = timeout / kOneSecondMicros; | 688 delta.tv_sec = timeout / kOneSecondMicros; |
544 | 689 |
545 struct timeval current_time; | 690 struct timeval current_time; |
546 // Get the current time. | 691 // Get the current time. |
(...skipping 13 matching lines...) Expand all Loading... |
560 while (true) { | 705 while (true) { |
561 int result = sem_trywait(&sem_); | 706 int result = sem_trywait(&sem_); |
562 if (result == 0) return true; // Successfully got semaphore. | 707 if (result == 0) return true; // Successfully got semaphore. |
563 if (!to) return false; // Timeout. | 708 if (!to) return false; // Timeout. |
564 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. | 709 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. |
565 usleep(ts.tv_nsec / 1000); | 710 usleep(ts.tv_nsec / 1000); |
566 to--; | 711 to--; |
567 } | 712 } |
568 } | 713 } |
569 | 714 |
570 | |
571 Semaphore* OS::CreateSemaphore(int count) { | 715 Semaphore* OS::CreateSemaphore(int count) { |
572 return new OpenBSDSemaphore(count); | 716 return new OpenBSDSemaphore(count); |
573 } | 717 } |
574 | 718 |
575 | 719 |
576 static pthread_t GetThreadID() { | 720 static pthread_t GetThreadID() { |
577 pthread_t thread_id = pthread_self(); | 721 return pthread_self(); |
578 return thread_id; | |
579 } | 722 } |
580 | 723 |
581 | |
582 class Sampler::PlatformData : public Malloced { | |
583 public: | |
584 PlatformData() : vm_tid_(GetThreadID()) {} | |
585 | |
586 pthread_t vm_tid() const { return vm_tid_; } | |
587 | |
588 private: | |
589 pthread_t vm_tid_; | |
590 }; | |
591 | |
592 | |
593 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { | 724 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { |
594 USE(info); | 725 USE(info); |
595 if (signal != SIGPROF) return; | 726 if (signal != SIGPROF) return; |
596 Isolate* isolate = Isolate::UncheckedCurrent(); | 727 Isolate* isolate = Isolate::UncheckedCurrent(); |
597 if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { | 728 if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { |
598 // We require a fully initialized and entered isolate. | 729 // We require a fully initialized and entered isolate. |
599 return; | 730 return; |
600 } | 731 } |
601 if (v8::Locker::IsActive() && | 732 if (v8::Locker::IsActive() && |
602 !isolate->thread_manager()->IsLockedByCurrentThread()) { | 733 !isolate->thread_manager()->IsLockedByCurrentThread()) { |
(...skipping 11 matching lines...) Expand all Loading... |
614 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); | 745 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
615 sample->state = isolate->current_vm_state(); | 746 sample->state = isolate->current_vm_state(); |
616 #if V8_HOST_ARCH_IA32 | 747 #if V8_HOST_ARCH_IA32 |
617 sample->pc = reinterpret_cast<Address>(ucontext->sc_eip); | 748 sample->pc = reinterpret_cast<Address>(ucontext->sc_eip); |
618 sample->sp = reinterpret_cast<Address>(ucontext->sc_esp); | 749 sample->sp = reinterpret_cast<Address>(ucontext->sc_esp); |
619 sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp); | 750 sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp); |
620 #elif V8_HOST_ARCH_X64 | 751 #elif V8_HOST_ARCH_X64 |
621 sample->pc = reinterpret_cast<Address>(ucontext->sc_rip); | 752 sample->pc = reinterpret_cast<Address>(ucontext->sc_rip); |
622 sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp); | 753 sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp); |
623 sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp); | 754 sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp); |
624 #elif V8_HOST_ARCH_ARM | |
625 sample->pc = reinterpret_cast<Address>(ucontext->sc_r15); | |
626 sample->sp = reinterpret_cast<Address>(ucontext->sc_r13); | |
627 sample->fp = reinterpret_cast<Address>(ucontext->sc_r11); | |
628 #endif | 755 #endif |
629 sampler->SampleStack(sample); | 756 sampler->SampleStack(sample); |
630 sampler->Tick(sample); | 757 sampler->Tick(sample); |
631 } | 758 } |
632 | 759 |
633 | 760 |
| 761 class Sampler::PlatformData : public Malloced { |
| 762 public: |
| 763 PlatformData() : vm_tid_(GetThreadID()) {} |
| 764 |
| 765 pthread_t vm_tid() const { return vm_tid_; } |
| 766 |
| 767 private: |
| 768 pthread_t vm_tid_; |
| 769 }; |
| 770 |
| 771 |
634 class SignalSender : public Thread { | 772 class SignalSender : public Thread { |
635 public: | 773 public: |
636 enum SleepInterval { | 774 enum SleepInterval { |
637 HALF_INTERVAL, | 775 HALF_INTERVAL, |
638 FULL_INTERVAL | 776 FULL_INTERVAL |
639 }; | 777 }; |
640 | 778 |
641 explicit SignalSender(int interval) | 779 explicit SignalSender(int interval) |
642 : Thread("SignalSender"), | 780 : Thread("SignalSender"), |
| 781 vm_tgid_(getpid()), |
643 interval_(interval) {} | 782 interval_(interval) {} |
644 | 783 |
| 784 static void InstallSignalHandler() { |
| 785 struct sigaction sa; |
| 786 sa.sa_sigaction = ProfilerSignalHandler; |
| 787 sigemptyset(&sa.sa_mask); |
| 788 sa.sa_flags = SA_RESTART | SA_SIGINFO; |
| 789 signal_handler_installed_ = |
| 790 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); |
| 791 } |
| 792 |
| 793 static void RestoreSignalHandler() { |
| 794 if (signal_handler_installed_) { |
| 795 sigaction(SIGPROF, &old_signal_handler_, 0); |
| 796 signal_handler_installed_ = false; |
| 797 } |
| 798 } |
| 799 |
645 static void AddActiveSampler(Sampler* sampler) { | 800 static void AddActiveSampler(Sampler* sampler) { |
646 ScopedLock lock(mutex_); | 801 ScopedLock lock(mutex_); |
647 SamplerRegistry::AddActiveSampler(sampler); | 802 SamplerRegistry::AddActiveSampler(sampler); |
648 if (instance_ == NULL) { | 803 if (instance_ == NULL) { |
649 // Install a signal handler. | 804 // Start a thread that will send SIGPROF signal to VM threads, |
650 struct sigaction sa; | 805 // when CPU profiling will be enabled. |
651 sa.sa_sigaction = ProfilerSignalHandler; | |
652 sigemptyset(&sa.sa_mask); | |
653 sa.sa_flags = SA_RESTART | SA_SIGINFO; | |
654 signal_handler_installed_ = | |
655 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); | |
656 | |
657 // Start a thread that sends SIGPROF signal to VM threads. | |
658 instance_ = new SignalSender(sampler->interval()); | 806 instance_ = new SignalSender(sampler->interval()); |
659 instance_->Start(); | 807 instance_->Start(); |
660 } else { | 808 } else { |
661 ASSERT(instance_->interval_ == sampler->interval()); | 809 ASSERT(instance_->interval_ == sampler->interval()); |
662 } | 810 } |
663 } | 811 } |
664 | 812 |
665 static void RemoveActiveSampler(Sampler* sampler) { | 813 static void RemoveActiveSampler(Sampler* sampler) { |
666 ScopedLock lock(mutex_); | 814 ScopedLock lock(mutex_); |
667 SamplerRegistry::RemoveActiveSampler(sampler); | 815 SamplerRegistry::RemoveActiveSampler(sampler); |
668 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { | 816 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { |
669 RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); | 817 RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); |
670 delete instance_; | 818 delete instance_; |
671 instance_ = NULL; | 819 instance_ = NULL; |
672 | 820 RestoreSignalHandler(); |
673 // Restore the old signal handler. | |
674 if (signal_handler_installed_) { | |
675 sigaction(SIGPROF, &old_signal_handler_, 0); | |
676 signal_handler_installed_ = false; | |
677 } | |
678 } | 821 } |
679 } | 822 } |
680 | 823 |
681 // Implement Thread::Run(). | 824 // Implement Thread::Run(). |
682 virtual void Run() { | 825 virtual void Run() { |
683 SamplerRegistry::State state; | 826 SamplerRegistry::State state; |
684 while ((state = SamplerRegistry::GetState()) != | 827 while ((state = SamplerRegistry::GetState()) != |
685 SamplerRegistry::HAS_NO_SAMPLERS) { | 828 SamplerRegistry::HAS_NO_SAMPLERS) { |
686 bool cpu_profiling_enabled = | 829 bool cpu_profiling_enabled = |
687 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); | 830 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); |
688 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); | 831 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); |
| 832 if (cpu_profiling_enabled && !signal_handler_installed_) { |
| 833 InstallSignalHandler(); |
| 834 } else if (!cpu_profiling_enabled && signal_handler_installed_) { |
| 835 RestoreSignalHandler(); |
| 836 } |
689 // When CPU profiling is enabled both JavaScript and C++ code is | 837 // When CPU profiling is enabled both JavaScript and C++ code is |
690 // profiled. We must not suspend. | 838 // profiled. We must not suspend. |
691 if (!cpu_profiling_enabled) { | 839 if (!cpu_profiling_enabled) { |
692 if (rate_limiter_.SuspendIfNecessary()) continue; | 840 if (rate_limiter_.SuspendIfNecessary()) continue; |
693 } | 841 } |
694 if (cpu_profiling_enabled && runtime_profiler_enabled) { | 842 if (cpu_profiling_enabled && runtime_profiler_enabled) { |
695 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { | 843 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { |
696 return; | 844 return; |
697 } | 845 } |
698 Sleep(HALF_INTERVAL); | 846 Sleep(HALF_INTERVAL); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
745 fprintf(stderr, | 893 fprintf(stderr, |
746 "SignalSender usleep error; interval = %u, errno = %d\n", | 894 "SignalSender usleep error; interval = %u, errno = %d\n", |
747 interval, | 895 interval, |
748 errno); | 896 errno); |
749 ASSERT(result == 0 || errno == EINTR); | 897 ASSERT(result == 0 || errno == EINTR); |
750 } | 898 } |
751 #endif | 899 #endif |
752 USE(result); | 900 USE(result); |
753 } | 901 } |
754 | 902 |
| 903 const int vm_tgid_; |
755 const int interval_; | 904 const int interval_; |
756 RuntimeProfilerRateLimiter rate_limiter_; | 905 RuntimeProfilerRateLimiter rate_limiter_; |
757 | 906 |
758 // Protects the process wide state below. | 907 // Protects the process wide state below. |
759 static Mutex* mutex_; | 908 static Mutex* mutex_; |
760 static SignalSender* instance_; | 909 static SignalSender* instance_; |
761 static bool signal_handler_installed_; | 910 static bool signal_handler_installed_; |
762 static struct sigaction old_signal_handler_; | 911 static struct sigaction old_signal_handler_; |
763 | 912 |
764 DISALLOW_COPY_AND_ASSIGN(SignalSender); | 913 DISALLOW_COPY_AND_ASSIGN(SignalSender); |
765 }; | 914 }; |
766 | 915 |
| 916 |
767 Mutex* SignalSender::mutex_ = OS::CreateMutex(); | 917 Mutex* SignalSender::mutex_ = OS::CreateMutex(); |
768 SignalSender* SignalSender::instance_ = NULL; | 918 SignalSender* SignalSender::instance_ = NULL; |
769 struct sigaction SignalSender::old_signal_handler_; | 919 struct sigaction SignalSender::old_signal_handler_; |
770 bool SignalSender::signal_handler_installed_ = false; | 920 bool SignalSender::signal_handler_installed_ = false; |
771 | 921 |
772 | 922 |
773 Sampler::Sampler(Isolate* isolate, int interval) | 923 Sampler::Sampler(Isolate* isolate, int interval) |
774 : isolate_(isolate), | 924 : isolate_(isolate), |
775 interval_(interval), | 925 interval_(interval), |
776 profiling_(false), | 926 profiling_(false), |
(...skipping 17 matching lines...) Expand all Loading... |
794 | 944 |
795 | 945 |
796 void Sampler::Stop() { | 946 void Sampler::Stop() { |
797 ASSERT(IsActive()); | 947 ASSERT(IsActive()); |
798 SignalSender::RemoveActiveSampler(this); | 948 SignalSender::RemoveActiveSampler(this); |
799 SetActive(false); | 949 SetActive(false); |
800 } | 950 } |
801 | 951 |
802 | 952 |
803 } } // namespace v8::internal | 953 } } // namespace v8::internal |
OLD | NEW |