| OLD | NEW |
| 1 //===------------------------ memory.cpp ----------------------------------===// | 1 //===------------------------ memory.cpp ----------------------------------===// |
| 2 // | 2 // |
| 3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
| 4 // | 4 // |
| 5 // This file is dual licensed under the MIT and the University of Illinois Open | 5 // This file is dual licensed under the MIT and the University of Illinois Open |
| 6 // Source Licenses. See LICENSE.TXT for details. | 6 // Source Licenses. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 | 9 |
| 10 #define _LIBCPP_BUILDING_MEMORY | 10 #define _LIBCPP_BUILDING_MEMORY |
| 11 #include "cassert" |
| 11 #include "memory" | 12 #include "memory" |
| 12 #ifndef _LIBCPP_HAS_NO_THREADS | 13 #ifndef _LIBCPP_HAS_NO_THREADS |
| 13 #include "mutex" | 14 #include "mutex" |
| 14 #include "thread" | 15 #include "thread" |
| 15 #endif | 16 #endif |
| 16 #include "include/atomic_support.h" | 17 #include "include/atomic_support.h" |
| 17 | 18 |
| 19 #if defined(__APPLE__) |
| 20 #include <malloc/malloc.h> |
| 21 #elif defined(ANDROID) |
| 22 extern "C" size_t dlmalloc_usable_size(const void*) __attribute__((weak)); |
| 23 #endif |
| 24 |
| 18 _LIBCPP_BEGIN_NAMESPACE_STD | 25 _LIBCPP_BEGIN_NAMESPACE_STD |
| 19 | 26 |
| 20 namespace | 27 namespace |
| 21 { | 28 { |
| 22 | 29 |
| 23 // NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively) | 30 // NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively) |
| 24 // should be sufficient for thread safety. | 31 // should be sufficient for thread safety. |
| 25 // See https://llvm.org/bugs/show_bug.cgi?id=22803 | 32 // See https://llvm.org/bugs/show_bug.cgi?id=22803 |
| 26 template <class T> | 33 template <class T> |
| 27 inline T | 34 inline T |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 218 if (d <= space - size) | 225 if (d <= space - size) |
| 219 { | 226 { |
| 220 r = p2; | 227 r = p2; |
| 221 ptr = r; | 228 ptr = r; |
| 222 space -= d; | 229 space -= d; |
| 223 } | 230 } |
| 224 } | 231 } |
| 225 return r; | 232 return r; |
| 226 } | 233 } |
| 227 | 234 |
| 235 // allocation_counter |
| 236 |
| 237 namespace |
| 238 { |
| 239 |
| 240 struct lockable_counter { |
| 241 pthread_mutex_t lock; |
| 242 allocation_counter::value value; |
| 243 }; |
| 244 |
| 245 lockable_counter lockable_counters[] = |
| 246 { |
| 247 {PTHREAD_MUTEX_INITIALIZER}, |
| 248 {PTHREAD_MUTEX_INITIALIZER}, |
| 249 {PTHREAD_MUTEX_INITIALIZER}, |
| 250 {PTHREAD_MUTEX_INITIALIZER}, |
| 251 {PTHREAD_MUTEX_INITIALIZER}, |
| 252 {PTHREAD_MUTEX_INITIALIZER}, |
| 253 {PTHREAD_MUTEX_INITIALIZER}, |
| 254 {PTHREAD_MUTEX_INITIALIZER}, |
| 255 {PTHREAD_MUTEX_INITIALIZER}, |
| 256 {PTHREAD_MUTEX_INITIALIZER}, |
| 257 {PTHREAD_MUTEX_INITIALIZER}, |
| 258 {PTHREAD_MUTEX_INITIALIZER}, |
| 259 {PTHREAD_MUTEX_INITIALIZER}, |
| 260 {PTHREAD_MUTEX_INITIALIZER}, |
| 261 {PTHREAD_MUTEX_INITIALIZER}, |
| 262 }; |
| 263 constexpr size_t lockable_counters_size = sizeof(lockable_counters) / sizeof(*lo
ckable_counters); |
| 264 static_assert(lockable_counters_size == allocation_group_count, "bad number of a
llocation counters"); |
| 265 |
| 266 lockable_counter& counter_for_group(allocation_group group) |
| 267 { |
| 268 return lockable_counters[static_cast<int>(group)]; |
| 269 } |
| 270 |
| 271 size_t get_usable_size(const void* ptr) { |
| 272 #if defined(__APPLE__) |
| 273 return malloc_size(ptr); |
| 274 #elif defined(ANDROID) |
| 275 return dlmalloc_usable_size(ptr); |
| 276 #else |
| 277 #error Unsupported platform! |
| 278 #endif |
| 279 } |
| 280 |
| 281 } // namespace |
| 282 |
| 283 const char* get_allocation_group_name(allocation_group group) |
| 284 { |
| 285 switch (group) |
| 286 { |
| 287 case allocation_group::allocator: return "std::allocator"; |
| 288 case allocation_group::deque: return "std::deque"; |
| 289 case allocation_group::list: return "std::list"; |
| 290 case allocation_group::map: return "std::map"; |
| 291 case allocation_group::multimap: return "std::multimap"; |
| 292 case allocation_group::multiset: return "std::multiset"; |
| 293 case allocation_group::set: return "std::set"; |
| 294 case allocation_group::shared_ptr: return "std::shared_ptr"; |
| 295 case allocation_group::string: return "std::string"; |
| 296 case allocation_group::unordered_map: return "std::unordered_map"; |
| 297 case allocation_group::unordered_multimap: return "std::unordered_multim
ap"; |
| 298 case allocation_group::unordered_multiset: return "std::unordered_multis
et"; |
| 299 case allocation_group::unordered_set: return "std::unordered_set"; |
| 300 case allocation_group::vector: return "std::vector"; |
| 301 case allocation_group::vector_bool: return "std::vector<bool>"; |
| 302 |
| 303 case allocation_group::group_count: break; |
| 304 } |
| 305 return nullptr; |
| 306 } |
| 307 |
| 308 allocation_counter::value allocation_counter::get(allocation_group group) |
| 309 { |
| 310 value result; |
| 311 auto& counter = counter_for_group(group); |
| 312 pthread_mutex_lock(&counter.lock); |
| 313 { |
| 314 result = counter.value; |
| 315 } |
| 316 pthread_mutex_unlock(&counter.lock); |
| 317 return result; |
| 318 } |
| 319 |
| 320 void allocation_counter::allocated(allocation_group group, const void* ptr, size
_t size, size_t payload_size) |
| 321 { |
| 322 if (!ptr) { |
| 323 return; |
| 324 } |
| 325 size_t usable_size = get_usable_size(ptr); |
| 326 assert(usable_size >= size); |
| 327 auto& counter = counter_for_group(group); |
| 328 pthread_mutex_lock(&counter.lock); |
| 329 { |
| 330 counter.value.total_usable_size += usable_size; |
| 331 counter.value.total_size += size; |
| 332 counter.value.total_count += 1; |
| 333 counter.value.total_payload_size += payload_size; |
| 334 } |
| 335 pthread_mutex_unlock(&counter.lock); |
| 336 } |
| 337 |
| 338 void allocation_counter::deallocated(allocation_group group, const void* ptr, si
ze_t size, size_t payload_size) |
| 339 { |
| 340 size_t usable_size = get_usable_size(ptr); |
| 341 assert(usable_size >= size); |
| 342 auto& counter = counter_for_group(group); |
| 343 pthread_mutex_lock(&counter.lock); |
| 344 { |
| 345 assert(counter.value.total_usable_size >= usable_size && |
| 346 counter.value.total_size >= size && |
| 347 counter.value.total_count >= 1 && |
| 348 counter.value.total_payload_size >= payload_size); |
| 349 counter.value.total_usable_size -= usable_size; |
| 350 counter.value.total_size -= size; |
| 351 counter.value.total_count -= 1; |
| 352 counter.value.total_payload_size -= payload_size; |
| 353 assert(counter.value.total_constructed_size <= counter.value.total_size)
; |
| 354 } |
| 355 pthread_mutex_unlock(&counter.lock); |
| 356 } |
| 357 |
| 358 void allocation_counter::constructed(allocation_group group, size_t size) { |
| 359 auto& counter = counter_for_group(group); |
| 360 pthread_mutex_lock(&counter.lock); |
| 361 { |
| 362 counter.value.total_constructed_size += size; |
| 363 assert(counter.value.total_constructed_size <= counter.value.total_size)
; |
| 364 } |
| 365 pthread_mutex_unlock(&counter.lock); |
| 366 } |
| 367 |
| 368 void allocation_counter::destroyed(allocation_group group, size_t size) { |
| 369 auto& counter = counter_for_group(group); |
| 370 pthread_mutex_lock(&counter.lock); |
| 371 { |
| 372 assert(size <= counter.value.total_constructed_size); |
| 373 counter.value.total_constructed_size -= size; |
| 374 } |
| 375 pthread_mutex_unlock(&counter.lock); |
| 376 } |
| 377 |
| 228 _LIBCPP_END_NAMESPACE_STD | 378 _LIBCPP_END_NAMESPACE_STD |
| OLD | NEW |