OLD | NEW |
(Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/allocator/malloc_zone_functions_mac.h" |
| 6 |
| 7 #include "base/atomicops.h" |
| 8 #include "base/synchronization/lock.h" |
| 9 |
| 10 namespace base { |
| 11 namespace allocator { |
| 12 |
| 13 MallocZoneFunctions* g_malloc_zones = nullptr; |
| 14 MallocZoneFunctions::MallocZoneFunctions() {} |
| 15 |
| 16 void StoreZoneFunctions(const ChromeMallocZone* zone, |
| 17 MallocZoneFunctions* functions) { |
| 18 functions->malloc = zone->malloc; |
| 19 functions->calloc = zone->calloc; |
| 20 functions->valloc = zone->valloc; |
| 21 functions->free = zone->free; |
| 22 functions->realloc = zone->realloc; |
| 23 functions->size = zone->size; |
| 24 CHECK(functions->malloc && functions->calloc && functions->valloc && |
| 25 functions->free && functions->realloc && functions->size); |
| 26 |
| 27 // These functions might be nullptr. |
| 28 functions->batch_malloc = zone->batch_malloc; |
| 29 functions->batch_free = zone->batch_free; |
| 30 |
| 31 if (zone->version >= 5) { |
| 32 // Not all custom malloc zones have a memalign. |
| 33 functions->memalign = zone->memalign; |
| 34 } |
| 35 if (zone->version >= 6) { |
| 36 // This may be nullptr. |
| 37 functions->free_definite_size = zone->free_definite_size; |
| 38 } |
| 39 |
| 40 functions->context = zone; |
| 41 } |
| 42 |
| 43 namespace { |
| 44 |
| 45 // All modifications to g_malloc_zones are gated behind this lock. |
| 46 // Dispatch to a malloc zone does not need to acquire this lock. |
| 47 base::Lock& GetLock() { |
| 48 static base::Lock* g_lock = new base::Lock; |
| 49 return *g_lock; |
| 50 } |
| 51 |
| 52 void EnsureMallocZonesInitializedLocked() { |
| 53 GetLock().AssertAcquired(); |
| 54 if (!g_malloc_zones) { |
| 55 g_malloc_zones = reinterpret_cast<base::allocator::MallocZoneFunctions*>( |
| 56 calloc(kMaxZoneCount, sizeof(MallocZoneFunctions))); |
| 57 } |
| 58 } |
| 59 |
| 60 int g_zone_count = 0; |
| 61 |
| 62 bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) { |
| 63 EnsureMallocZonesInitializedLocked(); |
| 64 GetLock().AssertAcquired(); |
| 65 for (int i = 0; i < g_zone_count; ++i) { |
| 66 if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone)) |
| 67 return true; |
| 68 } |
| 69 return false; |
| 70 } |
| 71 |
| 72 } // namespace |
| 73 |
| 74 void StoreMallocZone(ChromeMallocZone* zone) { |
| 75 base::AutoLock l(GetLock()); |
| 76 EnsureMallocZonesInitializedLocked(); |
| 77 if (IsMallocZoneAlreadyStoredLocked(zone)) |
| 78 return; |
| 79 |
| 80 if (g_zone_count == kMaxZoneCount) |
| 81 return; |
| 82 |
| 83 StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]); |
| 84 ++g_zone_count; |
| 85 |
| 86 // No other thread can possibly see these stores at this point. The code that |
| 87 // reads these values is triggered after this function returns. so we want to |
| 88 // guarantee that they are committed at this stage" |
| 89 base::subtle::MemoryBarrier(); |
| 90 } |
| 91 |
| 92 bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) { |
| 93 base::AutoLock l(GetLock()); |
| 94 return IsMallocZoneAlreadyStoredLocked(zone); |
| 95 } |
| 96 |
| 97 int GetMallocZoneCountForTesting() { |
| 98 base::AutoLock l(GetLock()); |
| 99 return g_zone_count; |
| 100 } |
| 101 |
| 102 void ClearAllMallocZonesForTesting() { |
| 103 base::AutoLock l(GetLock()); |
| 104 EnsureMallocZonesInitializedLocked(); |
| 105 memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions)); |
| 106 g_zone_count = 0; |
| 107 } |
| 108 |
| 109 } // namespace allocator |
| 110 } // namespace base |
OLD | NEW |