Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "chrome/common/profiling/memlog_allocator_shim.h" | |
| 6 | |
| 7 #include "base/allocator/allocator_shim.h" | |
| 8 #include "base/debug/debugging_flags.h" | |
| 9 #include "base/debug/stack_trace.h" | |
| 10 #include "base/synchronization/lock.h" | |
| 11 #include "base/trace_event/heap_profiler_allocation_register.h" | |
| 12 #include "chrome/common/profiling/memlog_stream.h" | |
| 13 | |
| 14 namespace profiling { | |
| 15 | |
| 16 namespace { | |
| 17 | |
| 18 using base::allocator::AllocatorDispatch; | |
| 19 | |
| 20 MemlogSenderPipe* g_sender_pipe = nullptr; | |
| 21 const int kMaxStackEntries = 48; | |
|
awong
2017/06/20 21:01:47
Can we add some comments explaining why we chose t
| |
| 22 const int kSendBufferSize = 65536; | |
| 23 const int kNumSendBuffers = 17; // Prime since this is used like a hash table. | |
| 24 | |
| 25 class SendBuffer { | |
| 26 public: | |
| 27 SendBuffer() : buffer_(new char[kSendBufferSize]) {} | |
| 28 ~SendBuffer() { delete[] buffer_; } | |
| 29 | |
| 30 void Send(const void* data, size_t sz) { | |
| 31 base::AutoLock lock(lock_); | |
| 32 | |
| 33 if (used_ + sz > kSendBufferSize) | |
| 34 SendCurrentBuffer(); | |
| 35 | |
| 36 memcpy(&buffer_[used_], data, sz); | |
| 37 used_ += sz; | |
| 38 } | |
| 39 | |
| 40 private: | |
| 41 void SendCurrentBuffer() { | |
| 42 g_sender_pipe->Send(buffer_, used_); | |
| 43 used_ = 0; | |
| 44 } | |
| 45 | |
| 46 base::Lock lock_; | |
| 47 | |
| 48 char* buffer_; | |
| 49 size_t used_ = 0; | |
| 50 | |
| 51 DISALLOW_COPY_AND_ASSIGN(SendBuffer); | |
| 52 }; | |
| 53 | |
| 54 SendBuffer* g_send_buffers = nullptr; | |
| 55 | |
| 56 // "address" is the address in question, which is used to select which send | |
|
awong
2017/06/20 21:01:47
I thought chrome used |address| to designate param
brettw
2017/06/20 21:22:28
Lots of people do that, I do it sometimes, but the
| |
| 57 // buffer to use. | |
| 58 void DoSend(const void* address, const void* data, size_t size) { | |
| 59 base::trace_event::AllocationRegister::AddressHasher hasher; | |
| 60 int bin_to_use = hasher(address) % kNumSendBuffers; | |
| 61 g_send_buffers[bin_to_use].Send(data, size); | |
|
awong
2017/06/20 21:03:11
How does this handle threading?
| |
| 62 } | |
| 63 | |
| 64 void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) { | |
| 65 const AllocatorDispatch* const next = self->next; | |
| 66 void* ptr = next->alloc_function(next, size, context); | |
| 67 AllocatorShimLogAlloc(ptr, size); | |
| 68 return ptr; | |
| 69 } | |
| 70 | |
| 71 void* HookZeroInitAlloc(const AllocatorDispatch* self, | |
| 72 size_t n, | |
| 73 size_t size, | |
| 74 void* context) { | |
| 75 const AllocatorDispatch* const next = self->next; | |
| 76 void* ptr = next->alloc_zero_initialized_function(next, n, size, context); | |
| 77 AllocatorShimLogAlloc(ptr, n * size); | |
| 78 return ptr; | |
| 79 } | |
| 80 | |
| 81 void* HookAllocAligned(const AllocatorDispatch* self, | |
| 82 size_t alignment, | |
| 83 size_t size, | |
| 84 void* context) { | |
| 85 const AllocatorDispatch* const next = self->next; | |
| 86 void* ptr = next->alloc_aligned_function(next, alignment, size, context); | |
| 87 AllocatorShimLogAlloc(ptr, size); | |
| 88 return ptr; | |
| 89 } | |
| 90 | |
| 91 void* HookRealloc(const AllocatorDispatch* self, | |
| 92 void* address, | |
| 93 size_t size, | |
| 94 void* context) { | |
| 95 const AllocatorDispatch* const next = self->next; | |
| 96 void* ptr = next->realloc_function(next, address, size, context); | |
| 97 AllocatorShimLogFree(address); | |
| 98 if (size > 0) // realloc(size == 0) means free() | |
| 99 AllocatorShimLogAlloc(ptr, size); | |
| 100 return ptr; | |
| 101 } | |
| 102 | |
| 103 void HookFree(const AllocatorDispatch* self, void* address, void* context) { | |
| 104 AllocatorShimLogFree(address); | |
| 105 const AllocatorDispatch* const next = self->next; | |
| 106 next->free_function(next, address, context); | |
| 107 } | |
| 108 | |
| 109 size_t HookGetSizeEstimate(const AllocatorDispatch* self, | |
| 110 void* address, | |
| 111 void* context) { | |
| 112 const AllocatorDispatch* const next = self->next; | |
| 113 return next->get_size_estimate_function(next, address, context); | |
| 114 } | |
| 115 | |
| 116 unsigned HookBatchMalloc(const AllocatorDispatch* self, | |
| 117 size_t size, | |
| 118 void** results, | |
| 119 unsigned num_requested, | |
| 120 void* context) { | |
| 121 const AllocatorDispatch* const next = self->next; | |
| 122 unsigned count = | |
| 123 next->batch_malloc_function(next, size, results, num_requested, context); | |
| 124 for (unsigned i = 0; i < count; ++i) | |
| 125 AllocatorShimLogAlloc(results[i], size); | |
| 126 return count; | |
| 127 } | |
| 128 | |
| 129 void HookBatchFree(const AllocatorDispatch* self, | |
| 130 void** to_be_freed, | |
| 131 unsigned num_to_be_freed, | |
| 132 void* context) { | |
| 133 const AllocatorDispatch* const next = self->next; | |
| 134 for (unsigned i = 0; i < num_to_be_freed; ++i) | |
| 135 AllocatorShimLogFree(to_be_freed[i]); | |
| 136 next->batch_free_function(next, to_be_freed, num_to_be_freed, context); | |
| 137 } | |
| 138 | |
| 139 void HookFreeDefiniteSize(const AllocatorDispatch* self, | |
| 140 void* ptr, | |
| 141 size_t size, | |
| 142 void* context) { | |
| 143 AllocatorShimLogFree(ptr); | |
| 144 const AllocatorDispatch* const next = self->next; | |
| 145 next->free_definite_size_function(next, ptr, size, context); | |
| 146 } | |
| 147 | |
| 148 AllocatorDispatch g_memlog_hooks = { | |
| 149 &HookAlloc, // alloc_function | |
| 150 &HookZeroInitAlloc, // alloc_zero_initialized_function | |
| 151 &HookAllocAligned, // alloc_aligned_function | |
| 152 &HookRealloc, // realloc_function | |
| 153 &HookFree, // free_function | |
| 154 &HookGetSizeEstimate, // get_size_estimate_function | |
| 155 &HookBatchMalloc, // batch_malloc_function | |
| 156 &HookBatchFree, // batch_free_function | |
| 157 &HookFreeDefiniteSize, // free_definite_size_function | |
| 158 nullptr, // next | |
| 159 }; | |
| 160 | |
| 161 } // namespace | |
| 162 | |
| 163 void InitAllocatorShim(MemlogSenderPipe* sender_pipe) { | |
| 164 g_send_buffers = new SendBuffer[kNumSendBuffers]; | |
| 165 | |
| 166 g_sender_pipe = sender_pipe; | |
| 167 #ifdef NDEBUG | |
| 168 base::allocator::InsertAllocatorDispatch(&g_memlog_hooks); | |
| 169 #endif | |
| 170 } | |
| 171 | |
| 172 void AllocatorShimLogAlloc(void* address, size_t sz) { | |
| 173 if (!g_send_buffers) | |
| 174 return; | |
| 175 if (address) { | |
| 176 constexpr size_t max_message_size = | |
| 177 sizeof(AllocPacket) + kMaxStackEntries * sizeof(uint64_t); | |
| 178 char message[max_message_size]; | |
| 179 AllocPacket* alloc_packet = reinterpret_cast<AllocPacket*>(message); | |
|
awong
2017/06/20 21:01:47
:-/
Can you add a TODO + file a bug assigned to
| |
| 180 | |
| 181 uint64_t* stack = | |
| 182 reinterpret_cast<uint64_t*>(&message[sizeof(AllocPacket)]); | |
| 183 | |
| 184 #if 0 | |
|
awong
2017/06/20 21:01:47
???
brettw
2017/06/20 21:22:28
Oh, that was me testing performance without stack
| |
| 185 const void* frames[1]; | |
| 186 size_t frame_count = 0; | |
| 187 #else | |
| 188 | |
| 189 #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
| 190 const void* frames[kMaxStackEntries]; | |
| 191 size_t frame_count = base::debug::TraceStackFramePointers( | |
| 192 frames, kMaxStackEntries, | |
| 193 1); // exclude this function from the trace. | |
| 194 #else // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
| 195 // Fall-back to capturing the stack with base::debug::StackTrace, | |
| 196 // which is likely slower, but more reliable. | |
| 197 base::debug::StackTrace stack_trace(kMaxStackEntries); | |
| 198 size_t frame_count = 0u; | |
| 199 const void* const* frames = stack_trace.Addresses(&frame_count); | |
| 200 #endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) | |
| 201 | |
| 202 #endif | |
| 203 | |
| 204 // If there are too many frames, keep the ones furthest from main(). | |
| 205 for (size_t i = 0; i < frame_count; i++) | |
| 206 stack[i] = (uint64_t)frames[i]; | |
| 207 | |
| 208 alloc_packet->op = kAllocPacketType; | |
| 209 alloc_packet->time = 0; // TODO(brettw) add timestamp. | |
| 210 alloc_packet->address = (uint64_t)address; | |
| 211 alloc_packet->size = sz; | |
| 212 alloc_packet->stack_len = static_cast<uint32_t>(frame_count); | |
| 213 | |
| 214 DoSend(address, message, | |
| 215 sizeof(AllocPacket) + alloc_packet->stack_len * sizeof(uint64_t)); | |
| 216 } | |
| 217 } | |
| 218 | |
| 219 void AllocatorShimLogFree(void* address) { | |
| 220 if (!g_send_buffers) | |
| 221 return; | |
| 222 if (address) { | |
| 223 FreePacket free_packet; | |
| 224 free_packet.op = kFreePacketType; | |
| 225 free_packet.time = 0; // TODO(brettw) add timestamp. | |
| 226 free_packet.address = (uint64_t)address; | |
|
awong
2017/06/20 21:01:47
reinterpret_cast<>?
| |
| 227 | |
| 228 DoSend(address, &free_packet, sizeof(FreePacket)); | |
| 229 } | |
| 230 } | |
| 231 | |
| 232 } // namespace profiling | |
| OLD | NEW |