OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/allocator/malloc_zone_aggregator_mac.h" | |
6 | |
7 #include "base/atomicops.h" | |
8 | |
9 namespace base { | |
10 namespace allocator { | |
11 | |
12 MallocZoneFunctions::MallocZoneFunctions() {} | |
13 | |
14 void StoreZoneFunctions(ChromeMallocZone* zone, | |
15 MallocZoneFunctions* functions) { | |
16 functions->malloc = zone->malloc; | |
17 functions->calloc = zone->calloc; | |
18 functions->valloc = zone->valloc; | |
19 functions->free = zone->free; | |
20 functions->realloc = zone->realloc; | |
21 functions->size = zone->size; | |
22 CHECK(functions->malloc && functions->calloc && functions->valloc && | |
23 functions->free && functions->realloc && functions->size); | |
24 | |
25 // These functions might be nullptr. | |
26 functions->batch_malloc = zone->batch_malloc; | |
27 functions->batch_free = zone->batch_free; | |
28 | |
29 if (zone->version >= 5) { | |
30 // Not all custom malloc zones have a memalign. | |
Mark Mentovai
2017/02/21 22:52:51
Is this relevant to us anymore, or is this some ol
erikchen
2017/02/21 23:44:33
Happens on 10.11.
| |
31 functions->memalign = zone->memalign; | |
32 } | |
33 if (zone->version >= 6) { | |
34 // This may be nullptr. | |
35 functions->free_definite_size = zone->free_definite_size; | |
36 } | |
37 | |
38 functions->context = zone; | |
39 } | |
40 | |
41 MallocZoneAggregator::MallocZoneAggregator() { | |
42 memset(zones_, 0, sizeof(MallocZoneFunctions) * kMaxZoneCount); | |
Mark Mentovai
2017/02/21 22:52:51
This one can be sizeof(zones_) or sizeof(MallocZon
erikchen
2017/02/21 23:44:33
Done.
| |
43 } | |
44 MallocZoneAggregator::~MallocZoneAggregator() {} | |
45 | |
46 void* MallocZoneAggregator::DispatchMallocToZone(void* zone, size_t size) { | |
47 for (int i = 0; i < kMaxZoneCount; ++i) { | |
Mark Mentovai
2017/02/21 22:52:51
All of the rest can be arraysize(zones_).
erikchen
2017/02/21 23:44:33
Done.
| |
48 if (zones_[i].context == zone) { | |
49 return zones_[i].malloc(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
50 size); | |
51 } | |
52 } | |
53 CHECK(false); | |
54 return nullptr; | |
55 } | |
56 | |
57 void* MallocZoneAggregator::DispatchCallocToZone(void* zone, | |
58 size_t num_items, | |
59 size_t size) { | |
60 for (int i = 0; i < kMaxZoneCount; ++i) { | |
Mark Mentovai
2017/02/21 22:52:51
You’re concerned with performance and avoiding one
erikchen
2017/02/21 23:44:33
premature optimization is the *mumble mumble mumbl
| |
61 if (zones_[i].context == zone) { | |
62 return zones_[i].calloc(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
63 num_items, size); | |
64 } | |
65 } | |
66 CHECK(false); | |
67 return nullptr; | |
68 } | |
69 void* MallocZoneAggregator::DispatchVallocToZone(void* zone, size_t size) { | |
70 for (int i = 0; i < kMaxZoneCount; ++i) { | |
71 if (zones_[i].context == zone) { | |
72 return zones_[i].valloc(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
73 size); | |
74 } | |
75 } | |
76 CHECK(false); | |
77 return nullptr; | |
78 } | |
79 | |
80 void MallocZoneAggregator::DispatchFreeToZone(void* zone, void* ptr) { | |
Mark Mentovai
2017/02/21 22:52:50
By the time I got here, I thought that you should
erikchen
2017/02/21 23:44:33
After factoring out GetFunctionsForZone(), I no lo
| |
81 for (int i = 0; i < kMaxZoneCount; ++i) { | |
82 if (zones_[i].context == zone) { | |
83 zones_[i].free(reinterpret_cast<struct _malloc_zone_t*>(zone), ptr); | |
84 return; | |
85 } | |
86 } | |
87 CHECK(false); | |
88 } | |
89 | |
90 void* MallocZoneAggregator::DispatchReallocToZone(void* zone, | |
91 void* ptr, | |
92 size_t size) { | |
93 for (int i = 0; i < kMaxZoneCount; ++i) { | |
94 if (zones_[i].context == zone) { | |
95 return zones_[i].realloc(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
96 ptr, size); | |
97 } | |
98 } | |
99 CHECK(false); | |
100 return nullptr; | |
101 } | |
102 | |
103 void* MallocZoneAggregator::DispatchMemalignToZone(void* zone, | |
104 size_t alignment, | |
105 size_t size) { | |
106 for (int i = 0; i < kMaxZoneCount; ++i) { | |
107 if (zones_[i].context == zone) { | |
108 return zones_[i].memalign(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
109 alignment, size); | |
110 } | |
111 } | |
112 CHECK(false); | |
113 return nullptr; | |
114 } | |
115 | |
116 unsigned MallocZoneAggregator::DispatchBatchMallocToZone( | |
117 void* zone, | |
118 size_t size, | |
119 void** results, | |
120 unsigned num_requested) { | |
121 for (int i = 0; i < kMaxZoneCount; ++i) { | |
122 if (zones_[i].context == zone) { | |
123 return zones_[i].batch_malloc( | |
124 reinterpret_cast<struct _malloc_zone_t*>(zone), size, results, | |
125 num_requested); | |
126 } | |
127 } | |
128 CHECK(false); | |
129 return 0; | |
130 } | |
131 void MallocZoneAggregator::DispatchBatchFreeToZone(void* zone, | |
Mark Mentovai
2017/02/21 22:52:51
Blank line between functions.
erikchen
2017/02/21 23:44:33
Done.
| |
132 void** to_be_freed, | |
133 unsigned num_to_be_freed) { | |
134 for (int i = 0; i < kMaxZoneCount; ++i) { | |
135 if (zones_[i].context == zone) { | |
136 zones_[i].batch_free(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
137 to_be_freed, num_to_be_freed); | |
138 return; | |
139 } | |
140 } | |
141 CHECK(false); | |
142 } | |
143 | |
144 void MallocZoneAggregator::DispatchFreeDefiniteSizeToZone(void* zone, | |
145 void* ptr, | |
146 size_t size) { | |
147 for (int i = 0; i < kMaxZoneCount; ++i) { | |
148 if (zones_[i].context == zone) { | |
149 zones_[i].free_definite_size( | |
150 reinterpret_cast<struct _malloc_zone_t*>(zone), ptr, size); | |
151 return; | |
152 } | |
153 } | |
154 CHECK(false); | |
155 } | |
156 | |
157 size_t MallocZoneAggregator::DispatchGetSizeEstimateToZone(void* zone, | |
158 void* ptr) { | |
159 for (int i = 0; i < kMaxZoneCount; ++i) { | |
160 if (zones_[i].context == zone) { | |
161 return zones_[i].size(reinterpret_cast<struct _malloc_zone_t*>(zone), | |
162 ptr); | |
163 } | |
164 } | |
165 CHECK(false); | |
166 return 0; | |
167 } | |
168 | |
169 void MallocZoneAggregator::StoreZone(ChromeMallocZone* zone) { | |
170 base::AutoLock l(lock_); | |
171 if (IsZoneAlreadyStoredLockAcquired(zone)) | |
172 return; | |
173 | |
174 if (zone_count_ == kMaxZoneCount) | |
175 return; | |
176 | |
177 StoreZoneFunctions(zone, &zones_[zone_count_]); | |
178 ++zone_count_; | |
179 base::subtle::MemoryBarrier(); | |
180 } | |
181 | |
182 bool MallocZoneAggregator::IsZoneAlreadyStored(ChromeMallocZone* zone) { | |
183 base::AutoLock l(lock_); | |
184 return IsZoneAlreadyStoredLockAcquired(zone); | |
185 } | |
186 | |
187 int MallocZoneAggregator::GetZoneCount() { | |
188 base::AutoLock l(lock_); | |
189 return zone_count_; | |
190 } | |
191 | |
192 bool MallocZoneAggregator::IsZoneAlreadyStoredLockAcquired( | |
193 ChromeMallocZone* zone) { | |
194 lock_.AssertAcquired(); | |
195 for (int i = 0; i < zone_count_; ++i) { | |
196 if (zones_[i].context == reinterpret_cast<void*>(zone)) | |
197 return true; | |
198 } | |
199 return false; | |
200 } | |
201 | |
202 } // namespace allocator | |
203 } // namespace base | |
OLD | NEW |