OLD | NEW |
(Empty) | |
| 1 //===------------------------ memory.cpp ----------------------------------===// |
| 2 // |
| 3 // The LLVM Compiler Infrastructure |
| 4 // |
| 5 // This file is dual licensed under the MIT and the University of Illinois Open |
| 6 // Source Licenses. See LICENSE.TXT for details. |
| 7 // |
| 8 //===----------------------------------------------------------------------===// |
| 9 |
| 10 #define _LIBCPP_BUILDING_MEMORY |
| 11 #include "memory" |
| 12 #include "mutex" |
| 13 #include "thread" |
| 14 |
| 15 _LIBCPP_BEGIN_NAMESPACE_STD |
| 16 |
| 17 namespace |
| 18 { |
| 19 |
| 20 template <class T> |
| 21 inline T |
| 22 increment(T& t) _NOEXCEPT |
| 23 { |
| 24 return __sync_add_and_fetch(&t, 1); |
| 25 } |
| 26 |
| 27 template <class T> |
| 28 inline T |
| 29 decrement(T& t) _NOEXCEPT |
| 30 { |
| 31 return __sync_add_and_fetch(&t, -1); |
| 32 } |
| 33 |
| 34 } // namespace |
| 35 |
| 36 const allocator_arg_t allocator_arg = allocator_arg_t(); |
| 37 |
| 38 bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {} |
| 39 |
| 40 const char* |
| 41 bad_weak_ptr::what() const _NOEXCEPT |
| 42 { |
| 43 return "bad_weak_ptr"; |
| 44 } |
| 45 |
| 46 __shared_count::~__shared_count() |
| 47 { |
| 48 } |
| 49 |
| 50 void |
| 51 __shared_count::__add_shared() _NOEXCEPT |
| 52 { |
| 53 increment(__shared_owners_); |
| 54 } |
| 55 |
| 56 bool |
| 57 __shared_count::__release_shared() _NOEXCEPT |
| 58 { |
| 59 if (decrement(__shared_owners_) == -1) |
| 60 { |
| 61 __on_zero_shared(); |
| 62 return true; |
| 63 } |
| 64 return false; |
| 65 } |
| 66 |
| 67 __shared_weak_count::~__shared_weak_count() |
| 68 { |
| 69 } |
| 70 |
| 71 void |
| 72 __shared_weak_count::__add_shared() _NOEXCEPT |
| 73 { |
| 74 __shared_count::__add_shared(); |
| 75 } |
| 76 |
| 77 void |
| 78 __shared_weak_count::__add_weak() _NOEXCEPT |
| 79 { |
| 80 increment(__shared_weak_owners_); |
| 81 } |
| 82 |
| 83 void |
| 84 __shared_weak_count::__release_shared() _NOEXCEPT |
| 85 { |
| 86 if (__shared_count::__release_shared()) |
| 87 __release_weak(); |
| 88 } |
| 89 |
| 90 void |
| 91 __shared_weak_count::__release_weak() _NOEXCEPT |
| 92 { |
| 93 if (decrement(__shared_weak_owners_) == -1) |
| 94 __on_zero_shared_weak(); |
| 95 } |
| 96 |
| 97 __shared_weak_count* |
| 98 __shared_weak_count::lock() _NOEXCEPT |
| 99 { |
| 100 long object_owners = __shared_owners_; |
| 101 while (object_owners != -1) |
| 102 { |
| 103 if (__sync_bool_compare_and_swap(&__shared_owners_, |
| 104 object_owners, |
| 105 object_owners+1)) |
| 106 return this; |
| 107 object_owners = __shared_owners_; |
| 108 } |
| 109 return 0; |
| 110 } |
| 111 |
| 112 #ifndef _LIBCPP_NO_RTTI |
| 113 |
| 114 const void* |
| 115 __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT |
| 116 { |
| 117 return 0; |
| 118 } |
| 119 |
| 120 #endif // _LIBCPP_NO_RTTI |
| 121 |
| 122 #if __has_feature(cxx_atomic) |
| 123 |
| 124 static const std::size_t __sp_mut_count = 16; |
| 125 static pthread_mutex_t mut_back_imp[__sp_mut_count] = |
| 126 { |
| 127 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALI
ZER, PTHREAD_MUTEX_INITIALIZER, |
| 128 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALI
ZER, PTHREAD_MUTEX_INITIALIZER, |
| 129 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALI
ZER, PTHREAD_MUTEX_INITIALIZER, |
| 130 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALI
ZER, PTHREAD_MUTEX_INITIALIZER |
| 131 }; |
| 132 |
| 133 static mutex* mut_back = reinterpret_cast<std::mutex*>(mut_back_imp); |
| 134 |
| 135 _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT |
| 136 : __lx(p) |
| 137 { |
| 138 } |
| 139 |
| 140 void |
| 141 __sp_mut::lock() _NOEXCEPT |
| 142 { |
| 143 mutex& m = *static_cast<mutex*>(__lx); |
| 144 unsigned count = 0; |
| 145 while (!m.try_lock()) |
| 146 { |
| 147 if (++count > 16) |
| 148 { |
| 149 m.lock(); |
| 150 break; |
| 151 } |
| 152 this_thread::yield(); |
| 153 } |
| 154 } |
| 155 |
| 156 void |
| 157 __sp_mut::unlock() _NOEXCEPT |
| 158 { |
| 159 static_cast<mutex*>(__lx)->unlock(); |
| 160 } |
| 161 |
| 162 __sp_mut& |
| 163 __get_sp_mut(const void* p) |
| 164 { |
| 165 static __sp_mut muts[__sp_mut_count] |
| 166 { |
| 167 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3], |
| 168 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7], |
| 169 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11], |
| 170 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15] |
| 171 }; |
| 172 return muts[hash<const void*>()(p) & (__sp_mut_count-1)]; |
| 173 } |
| 174 |
| 175 #endif // __has_feature(cxx_atomic) |
| 176 |
| 177 void |
| 178 declare_reachable(void*) |
| 179 { |
| 180 } |
| 181 |
| 182 void |
| 183 declare_no_pointers(char*, size_t) |
| 184 { |
| 185 } |
| 186 |
| 187 void |
| 188 undeclare_no_pointers(char*, size_t) |
| 189 { |
| 190 } |
| 191 |
| 192 pointer_safety |
| 193 get_pointer_safety() _NOEXCEPT |
| 194 { |
| 195 return pointer_safety::relaxed; |
| 196 } |
| 197 |
| 198 void* |
| 199 __undeclare_reachable(void* p) |
| 200 { |
| 201 return p; |
| 202 } |
| 203 |
| 204 void* |
| 205 align(size_t alignment, size_t size, void*& ptr, size_t& space) |
| 206 { |
| 207 void* r = nullptr; |
| 208 if (size <= space) |
| 209 { |
| 210 char* p1 = static_cast<char*>(ptr); |
| 211 char* p2 = (char*)((size_t)(p1 + (alignment - 1)) & -alignment); |
| 212 size_t d = static_cast<size_t>(p2 - p1); |
| 213 if (d <= space - size) |
| 214 { |
| 215 r = p2; |
| 216 ptr = r; |
| 217 space -= d; |
| 218 } |
| 219 } |
| 220 return r; |
| 221 } |
| 222 |
| 223 _LIBCPP_END_NAMESPACE_STD |
OLD | NEW |