OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/common/profiling/memlog_allocator_shim.h" | |
6 | |
7 #include "base/allocator/allocator_shim.h" | |
8 #include "base/debug/debugging_flags.h" | |
9 #include "base/debug/stack_trace.h" | |
10 #include "base/synchronization/lock.h" | |
11 #include "base/trace_event/heap_profiler_allocation_register.h" | |
12 #include "chrome/common/profiling/memlog_stream.h" | |
13 | |
14 namespace profiling { | |
15 | |
16 namespace { | |
17 | |
18 using base::allocator::AllocatorDispatch; | |
19 | |
20 MemlogSenderPipe* g_sender_pipe = nullptr; | |
21 | |
22 // This maximum number of stack entries to log. Long-term we will likely want | |
23 // to raise this to avoid truncation. This matches the current value in the | |
24 // in-process heap profiler (heap_profiler_allocation_context.h) so the two | |
25 // systems performance and memory overhead can be compared consistently. | |
26 const int kMaxStackEntries = 48; | |
27 | |
28 // Matches the native buffer size on the pipe. | |
29 const int kSendBufferSize = 65536; | |
30 | |
31 // Prime since this is used like a hash table. Numbers of this magnitude seemed | |
32 // to provide sufficient parallelism to avoid lock overhead in ad-hoc testing. | |
33 const int kNumSendBuffers = 17; | |
34 | |
35 class SendBuffer { | |
36 public: | |
37 SendBuffer() : buffer_(new char[kSendBufferSize]) {} | |
38 ~SendBuffer() { delete[] buffer_; } | |
39 | |
40 void Send(const void* data, size_t sz) { | |
41 base::AutoLock lock(lock_); | |
42 | |
43 if (used_ + sz > kSendBufferSize) | |
44 SendCurrentBuffer(); | |
45 | |
46 memcpy(&buffer_[used_], data, sz); | |
47 used_ += sz; | |
48 } | |
49 | |
50 private: | |
51 void SendCurrentBuffer() { | |
52 g_sender_pipe->Send(buffer_, used_); | |
53 used_ = 0; | |
54 } | |
55 | |
56 base::Lock lock_; | |
57 | |
58 char* buffer_; | |
59 size_t used_ = 0; | |
60 | |
61 DISALLOW_COPY_AND_ASSIGN(SendBuffer); | |
62 }; | |
63 | |
64 SendBuffer* g_send_buffers = nullptr; | |
65 | |
66 // "address" is the address in question, which is used to select which send | |
67 // buffer to use. | |
68 void DoSend(const void* address, const void* data, size_t size) { | |
69 base::trace_event::AllocationRegister::AddressHasher hasher; | |
70 int bin_to_use = hasher(address) % kNumSendBuffers; | |
71 g_send_buffers[bin_to_use].Send(data, size); | |
72 } | |
73 | |
74 void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) { | |
75 const AllocatorDispatch* const next = self->next; | |
76 void* ptr = next->alloc_function(next, size, context); | |
77 AllocatorShimLogAlloc(ptr, size); | |
78 return ptr; | |
79 } | |
80 | |
81 void* HookZeroInitAlloc(const AllocatorDispatch* self, | |
82 size_t n, | |
83 size_t size, | |
84 void* context) { | |
85 const AllocatorDispatch* const next = self->next; | |
86 void* ptr = next->alloc_zero_initialized_function(next, n, size, context); | |
87 AllocatorShimLogAlloc(ptr, n * size); | |
88 return ptr; | |
89 } | |
90 | |
91 void* HookAllocAligned(const AllocatorDispatch* self, | |
92 size_t alignment, | |
93 size_t size, | |
94 void* context) { | |
95 const AllocatorDispatch* const next = self->next; | |
96 void* ptr = next->alloc_aligned_function(next, alignment, size, context); | |
97 AllocatorShimLogAlloc(ptr, size); | |
98 return ptr; | |
99 } | |
100 | |
101 void* HookRealloc(const AllocatorDispatch* self, | |
102 void* address, | |
103 size_t size, | |
104 void* context) { | |
105 const AllocatorDispatch* const next = self->next; | |
106 void* ptr = next->realloc_function(next, address, size, context); | |
107 AllocatorShimLogFree(address); | |
108 if (size > 0) // realloc(size == 0) means free() | |
109 AllocatorShimLogAlloc(ptr, size); | |
110 return ptr; | |
111 } | |
112 | |
113 void HookFree(const AllocatorDispatch* self, void* address, void* context) { | |
114 AllocatorShimLogFree(address); | |
115 const AllocatorDispatch* const next = self->next; | |
116 next->free_function(next, address, context); | |
117 } | |
118 | |
119 size_t HookGetSizeEstimate(const AllocatorDispatch* self, | |
120 void* address, | |
121 void* context) { | |
122 const AllocatorDispatch* const next = self->next; | |
123 return next->get_size_estimate_function(next, address, context); | |
124 } | |
125 | |
126 unsigned HookBatchMalloc(const AllocatorDispatch* self, | |
127 size_t size, | |
128 void** results, | |
129 unsigned num_requested, | |
130 void* context) { | |
131 const AllocatorDispatch* const next = self->next; | |
132 unsigned count = | |
133 next->batch_malloc_function(next, size, results, num_requested, context); | |
134 for (unsigned i = 0; i < count; ++i) | |
135 AllocatorShimLogAlloc(results[i], size); | |
136 return count; | |
137 } | |
138 | |
139 void HookBatchFree(const AllocatorDispatch* self, | |
140 void** to_be_freed, | |
141 unsigned num_to_be_freed, | |
142 void* context) { | |
143 const AllocatorDispatch* const next = self->next; | |
144 for (unsigned i = 0; i < num_to_be_freed; ++i) | |
145 AllocatorShimLogFree(to_be_freed[i]); | |
146 next->batch_free_function(next, to_be_freed, num_to_be_freed, context); | |
147 } | |
148 | |
149 void HookFreeDefiniteSize(const AllocatorDispatch* self, | |
150 void* ptr, | |
151 size_t size, | |
152 void* context) { | |
153 AllocatorShimLogFree(ptr); | |
154 const AllocatorDispatch* const next = self->next; | |
155 next->free_definite_size_function(next, ptr, size, context); | |
156 } | |
157 | |
158 AllocatorDispatch g_memlog_hooks = { | |
159 &HookAlloc, // alloc_function | |
160 &HookZeroInitAlloc, // alloc_zero_initialized_function | |
161 &HookAllocAligned, // alloc_aligned_function | |
162 &HookRealloc, // realloc_function | |
163 &HookFree, // free_function | |
164 &HookGetSizeEstimate, // get_size_estimate_function | |
165 &HookBatchMalloc, // batch_malloc_function | |
166 &HookBatchFree, // batch_free_function | |
167 &HookFreeDefiniteSize, // free_definite_size_function | |
168 nullptr, // next | |
169 }; | |
170 | |
171 } // namespace | |
172 | |
173 void InitAllocatorShim(MemlogSenderPipe* sender_pipe) { | |
174 g_send_buffers = new SendBuffer[kNumSendBuffers]; | |
175 | |
176 g_sender_pipe = sender_pipe; | |
177 #ifdef NDEBUG | |
178 base::allocator::InsertAllocatorDispatch(&g_memlog_hooks); | |
179 #endif | |
180 } | |
181 | |
182 void AllocatorShimLogAlloc(void* address, size_t sz) { | |
183 if (!g_send_buffers) | |
184 return; | |
185 if (address) { | |
186 constexpr size_t max_message_size = | |
187 sizeof(AllocPacket) + kMaxStackEntries * sizeof(uint64_t); | |
188 char message[max_message_size]; | |
Primiano Tucci (use gerrit)
2017/06/21 14:11:29
Doesn't this have to be declared ALIGNAS(sizeof(u
| |
189 // TODO(ajwong) check that this is technically valid. | |
190 AllocPacket* alloc_packet = reinterpret_cast<AllocPacket*>(message); | |
191 | |
192 uint64_t* stack = | |
193 reinterpret_cast<uint64_t*>(&message[sizeof(AllocPacket)]); | |
194 | |
195 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
196 const void* frames[kMaxStackEntries]; | |
197 size_t frame_count = base::debug::TraceStackFramePointers( | |
198 frames, kMaxStackEntries, | |
199 1); // exclude this function from the trace. | |
200 #else // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
201 // Fall-back to capturing the stack with base::debug::StackTrace, | |
202 // which is likely slower, but more reliable. | |
203 base::debug::StackTrace stack_trace(kMaxStackEntries); | |
204 size_t frame_count = 0u; | |
205 const void* const* frames = stack_trace.Addresses(&frame_count); | |
206 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
207 | |
208 // If there are too many frames, keep the ones furthest from main(). | |
209 for (size_t i = 0; i < frame_count; i++) | |
210 stack[i] = (uint64_t)frames[i]; | |
211 | |
212 alloc_packet->op = kAllocPacketType; | |
213 alloc_packet->time = 0; // TODO(brettw) add timestamp. | |
214 alloc_packet->address = (uint64_t)address; | |
215 alloc_packet->size = sz; | |
216 alloc_packet->stack_len = static_cast<uint32_t>(frame_count); | |
217 | |
218 DoSend(address, message, | |
219 sizeof(AllocPacket) + alloc_packet->stack_len * sizeof(uint64_t)); | |
220 } | |
221 } | |
222 | |
223 void AllocatorShimLogFree(void* address) { | |
224 if (!g_send_buffers) | |
225 return; | |
226 if (address) { | |
227 FreePacket free_packet; | |
228 free_packet.op = kFreePacketType; | |
229 free_packet.time = 0; // TODO(brettw) add timestamp. | |
230 free_packet.address = (uint64_t)address; | |
231 | |
232 DoSend(address, &free_packet, sizeof(FreePacket)); | |
233 } | |
234 } | |
235 | |
236 } // namespace profiling | |
OLD | NEW |