Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(418)

Side by Side Diff: src/platform-macos.cc

Issue 131363008: A64: Synchronize with r15922. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/platform-linux.cc ('k') | src/platform-openbsd.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 extern char** backtrace_symbols(void* const*, int) 71 extern char** backtrace_symbols(void* const*, int)
72 __attribute__((weak_import)); 72 __attribute__((weak_import));
73 extern void backtrace_symbols_fd(void* const*, int, int) 73 extern void backtrace_symbols_fd(void* const*, int, int)
74 __attribute__((weak_import)); 74 __attribute__((weak_import));
75 } 75 }
76 76
77 77
78 namespace v8 { 78 namespace v8 {
79 namespace internal { 79 namespace internal {
80 80
81 // 0 is never a valid thread id on MacOSX since a pthread_t is
82 // a pointer.
83 static const pthread_t kNoThread = (pthread_t) 0;
84
85
86 double ceiling(double x) {
87 // Correct Mac OS X Leopard 'ceil' behavior.
88 if (-1.0 < x && x < 0.0) {
89 return -0.0;
90 } else {
91 return ceil(x);
92 }
93 }
94
95 81
96 static Mutex* limit_mutex = NULL; 82 static Mutex* limit_mutex = NULL;
97 83
98 84
99 void OS::PostSetUp() {
100 POSIXPostSetUp();
101 }
102
103
104 // We keep the lowest and highest addresses mapped as a quick way of 85 // We keep the lowest and highest addresses mapped as a quick way of
105 // determining that pointers are outside the heap (used mostly in assertions 86 // determining that pointers are outside the heap (used mostly in assertions
106 // and verification). The estimate is conservative, i.e., not all addresses in 87 // and verification). The estimate is conservative, i.e., not all addresses in
107 // 'allocated' space are actually allocated to our heap. The range is 88 // 'allocated' space are actually allocated to our heap. The range is
108 // [lowest, highest), inclusive on the low and and exclusive on the high end. 89 // [lowest, highest), inclusive on the low and and exclusive on the high end.
109 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 90 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
110 static void* highest_ever_allocated = reinterpret_cast<void*>(0); 91 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
111 92
112 93
113 static void UpdateAllocatedSpaceLimits(void* address, int size) { 94 static void UpdateAllocatedSpaceLimits(void* address, int size) {
114 ASSERT(limit_mutex != NULL); 95 ASSERT(limit_mutex != NULL);
115 ScopedLock lock(limit_mutex); 96 ScopedLock lock(limit_mutex);
116 97
117 lowest_ever_allocated = Min(lowest_ever_allocated, address); 98 lowest_ever_allocated = Min(lowest_ever_allocated, address);
118 highest_ever_allocated = 99 highest_ever_allocated =
119 Max(highest_ever_allocated, 100 Max(highest_ever_allocated,
120 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 101 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
121 } 102 }
122 103
123 104
124 bool OS::IsOutsideAllocatedSpace(void* address) { 105 bool OS::IsOutsideAllocatedSpace(void* address) {
125 return address < lowest_ever_allocated || address >= highest_ever_allocated; 106 return address < lowest_ever_allocated || address >= highest_ever_allocated;
126 } 107 }
127 108
128 109
129 size_t OS::AllocateAlignment() {
130 return getpagesize();
131 }
132
133
134 // Constants used for mmap. 110 // Constants used for mmap.
135 // kMmapFd is used to pass vm_alloc flags to tag the region with the user 111 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
136 // defined tag 255 This helps identify V8-allocated regions in memory analysis 112 // defined tag 255 This helps identify V8-allocated regions in memory analysis
137 // tools like vmmap(1). 113 // tools like vmmap(1).
138 static const int kMmapFd = VM_MAKE_TAG(255); 114 static const int kMmapFd = VM_MAKE_TAG(255);
139 static const off_t kMmapFdOffset = 0; 115 static const off_t kMmapFdOffset = 0;
140 116
141 117
142 void* OS::Allocate(const size_t requested, 118 void* OS::Allocate(const size_t requested,
143 size_t* allocated, 119 size_t* allocated,
144 bool is_executable) { 120 bool is_executable) {
145 const size_t msize = RoundUp(requested, getpagesize()); 121 const size_t msize = RoundUp(requested, getpagesize());
146 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 122 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
147 void* mbase = mmap(OS::GetRandomMmapAddr(), 123 void* mbase = mmap(OS::GetRandomMmapAddr(),
148 msize, 124 msize,
149 prot, 125 prot,
150 MAP_PRIVATE | MAP_ANON, 126 MAP_PRIVATE | MAP_ANON,
151 kMmapFd, 127 kMmapFd,
152 kMmapFdOffset); 128 kMmapFdOffset);
153 if (mbase == MAP_FAILED) { 129 if (mbase == MAP_FAILED) {
154 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); 130 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
155 return NULL; 131 return NULL;
156 } 132 }
157 *allocated = msize; 133 *allocated = msize;
158 UpdateAllocatedSpaceLimits(mbase, msize); 134 UpdateAllocatedSpaceLimits(mbase, msize);
159 return mbase; 135 return mbase;
160 } 136 }
161 137
162 138
163 void OS::Free(void* address, const size_t size) {
164 // TODO(1240712): munmap has a return value which is ignored here.
165 int result = munmap(address, size);
166 USE(result);
167 ASSERT(result == 0);
168 }
169
170
171 void OS::Sleep(int milliseconds) {
172 usleep(1000 * milliseconds);
173 }
174
175
176 int OS::NumberOfCores() {
177 return sysconf(_SC_NPROCESSORS_ONLN);
178 }
179
180
181 void OS::Abort() {
182 // Redirect to std abort to signal abnormal program termination
183 abort();
184 }
185
186
187 void OS::DebugBreak() {
188 asm("int $3");
189 }
190
191
192 void OS::DumpBacktrace() { 139 void OS::DumpBacktrace() {
193 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. 140 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
194 if (backtrace == NULL) return; 141 if (backtrace == NULL) return;
195 142
196 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); 143 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
197 } 144 }
198 145
199 146
200 class PosixMemoryMappedFile : public OS::MemoryMappedFile { 147 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
201 public: 148 public:
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
277 LOG(Isolate::Current(), 224 LOG(Isolate::Current(),
278 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); 225 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
279 } 226 }
280 } 227 }
281 228
282 229
283 void OS::SignalCodeMovingGC() { 230 void OS::SignalCodeMovingGC() {
284 } 231 }
285 232
286 233
287 uint64_t OS::CpuFeaturesImpliedByPlatform() {
288 // MacOSX requires all these to install so we can assume they are present.
289 // These constants are defined by the CPUid instructions.
290 const uint64_t one = 1;
291 return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
292 }
293
294
295 int OS::ActivationFrameAlignment() {
296 // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
297 // Function Call Guide".
298 return 16;
299 }
300
301
302 const char* OS::LocalTimezone(double time) { 234 const char* OS::LocalTimezone(double time) {
303 if (std::isnan(time)) return ""; 235 if (std::isnan(time)) return "";
304 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 236 time_t tv = static_cast<time_t>(floor(time/msPerSecond));
305 struct tm* t = localtime(&tv); 237 struct tm* t = localtime(&tv);
306 if (NULL == t) return ""; 238 if (NULL == t) return "";
307 return t->tm_zone; 239 return t->tm_zone;
308 } 240 }
309 241
310 242
311 double OS::LocalTimeOffset() { 243 double OS::LocalTimeOffset() {
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
453 bool VirtualMemory::ReleaseRegion(void* address, size_t size) { 385 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
454 return munmap(address, size) == 0; 386 return munmap(address, size) == 0;
455 } 387 }
456 388
457 389
458 bool VirtualMemory::HasLazyCommits() { 390 bool VirtualMemory::HasLazyCommits() {
459 return false; 391 return false;
460 } 392 }
461 393
462 394
463 class Thread::PlatformData : public Malloced {
464 public:
465 PlatformData() : thread_(kNoThread) {}
466 pthread_t thread_; // Thread handle for pthread.
467 };
468
469
470 Thread::Thread(const Options& options)
471 : data_(new PlatformData),
472 stack_size_(options.stack_size()),
473 start_semaphore_(NULL) {
474 set_name(options.name());
475 }
476
477
478 Thread::~Thread() {
479 delete data_;
480 }
481
482
483 static void SetThreadName(const char* name) {
484 // pthread_setname_np is only available in 10.6 or later, so test
485 // for it at runtime.
486 int (*dynamic_pthread_setname_np)(const char*);
487 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
488 dlsym(RTLD_DEFAULT, "pthread_setname_np");
489 if (!dynamic_pthread_setname_np)
490 return;
491
492 // Mac OS X does not expose the length limit of the name, so hardcode it.
493 static const int kMaxNameLength = 63;
494 USE(kMaxNameLength);
495 ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
496 dynamic_pthread_setname_np(name);
497 }
498
499
500 static void* ThreadEntry(void* arg) {
501 Thread* thread = reinterpret_cast<Thread*>(arg);
502 // This is also initialized by the first argument to pthread_create() but we
503 // don't know which thread will run first (the original thread or the new
504 // one) so we initialize it here too.
505 thread->data()->thread_ = pthread_self();
506 SetThreadName(thread->name());
507 ASSERT(thread->data()->thread_ != kNoThread);
508 thread->NotifyStartedAndRun();
509 return NULL;
510 }
511
512
513 void Thread::set_name(const char* name) {
514 strncpy(name_, name, sizeof(name_));
515 name_[sizeof(name_) - 1] = '\0';
516 }
517
518
519 void Thread::Start() {
520 pthread_attr_t* attr_ptr = NULL;
521 pthread_attr_t attr;
522 if (stack_size_ > 0) {
523 pthread_attr_init(&attr);
524 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
525 attr_ptr = &attr;
526 }
527 pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
528 ASSERT(data_->thread_ != kNoThread);
529 }
530
531
532 void Thread::Join() {
533 pthread_join(data_->thread_, NULL);
534 }
535
536
537 #ifdef V8_FAST_TLS_SUPPORTED
538
539 static Atomic32 tls_base_offset_initialized = 0;
540 intptr_t kMacTlsBaseOffset = 0;
541
542 // It's safe to do the initialization more that once, but it has to be
543 // done at least once.
544 static void InitializeTlsBaseOffset() {
545 const size_t kBufferSize = 128;
546 char buffer[kBufferSize];
547 size_t buffer_size = kBufferSize;
548 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
549 if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
550 V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
551 }
552 // The buffer now contains a string of the form XX.YY.ZZ, where
553 // XX is the major kernel version component.
554 // Make sure the buffer is 0-terminated.
555 buffer[kBufferSize - 1] = '\0';
556 char* period_pos = strchr(buffer, '.');
557 *period_pos = '\0';
558 int kernel_version_major =
559 static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
560 // The constants below are taken from pthreads.s from the XNU kernel
561 // sources archive at www.opensource.apple.com.
562 if (kernel_version_major < 11) {
563 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
564 // same offsets.
565 #if V8_HOST_ARCH_IA32
566 kMacTlsBaseOffset = 0x48;
567 #else
568 kMacTlsBaseOffset = 0x60;
569 #endif
570 } else {
571 // 11.x.x (Lion) changed the offset.
572 kMacTlsBaseOffset = 0;
573 }
574
575 Release_Store(&tls_base_offset_initialized, 1);
576 }
577
578
579 static void CheckFastTls(Thread::LocalStorageKey key) {
580 void* expected = reinterpret_cast<void*>(0x1234CAFE);
581 Thread::SetThreadLocal(key, expected);
582 void* actual = Thread::GetExistingThreadLocal(key);
583 if (expected != actual) {
584 V8_Fatal(__FILE__, __LINE__,
585 "V8 failed to initialize fast TLS on current kernel");
586 }
587 Thread::SetThreadLocal(key, NULL);
588 }
589
590 #endif // V8_FAST_TLS_SUPPORTED
591
592
593 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
594 #ifdef V8_FAST_TLS_SUPPORTED
595 bool check_fast_tls = false;
596 if (tls_base_offset_initialized == 0) {
597 check_fast_tls = true;
598 InitializeTlsBaseOffset();
599 }
600 #endif
601 pthread_key_t key;
602 int result = pthread_key_create(&key, NULL);
603 USE(result);
604 ASSERT(result == 0);
605 LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
606 #ifdef V8_FAST_TLS_SUPPORTED
607 // If we just initialized fast TLS support, make sure it works.
608 if (check_fast_tls) CheckFastTls(typed_key);
609 #endif
610 return typed_key;
611 }
612
613
614 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
615 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
616 int result = pthread_key_delete(pthread_key);
617 USE(result);
618 ASSERT(result == 0);
619 }
620
621
622 void* Thread::GetThreadLocal(LocalStorageKey key) {
623 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
624 return pthread_getspecific(pthread_key);
625 }
626
627
628 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
629 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
630 pthread_setspecific(pthread_key, value);
631 }
632
633
634 class MacOSSemaphore : public Semaphore { 395 class MacOSSemaphore : public Semaphore {
635 public: 396 public:
636 explicit MacOSSemaphore(int count) { 397 explicit MacOSSemaphore(int count) {
637 int r; 398 int r;
638 r = semaphore_create(mach_task_self(), 399 r = semaphore_create(mach_task_self(),
639 &semaphore_, 400 &semaphore_,
640 SYNC_POLICY_FIFO, 401 SYNC_POLICY_FIFO,
641 count); 402 count);
642 ASSERT(r == KERN_SUCCESS); 403 ASSERT(r == KERN_SUCCESS);
643 } 404 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
685 limit_mutex = CreateMutex(); 446 limit_mutex = CreateMutex();
686 } 447 }
687 448
688 449
689 void OS::TearDown() { 450 void OS::TearDown() {
690 delete limit_mutex; 451 delete limit_mutex;
691 } 452 }
692 453
693 454
694 } } // namespace v8::internal 455 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/platform-linux.cc ('k') | src/platform-openbsd.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698