Chromium Code Reviews| Index: base/trace_event/heap_profiler_allocation_context.cc |
| diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc |
| index 374d5043d19e3bc74f44b88f7bcab45eff02c095..2b6268bfb7d42ec4b5e5f48842673c7c5cb16e7d 100644 |
| --- a/base/trace_event/heap_profiler_allocation_context.cc |
| +++ b/base/trace_event/heap_profiler_allocation_context.cc |
| @@ -31,12 +31,23 @@ bool operator==(const Backtrace& lhs, const Backtrace& rhs) { |
| return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames); |
| } |
| +bool operator!=(const Backtrace& lhs, const Backtrace& rhs) { |
| + return !(lhs == rhs); |
| +} |
| + |
| AllocationContext::AllocationContext(): type_name(nullptr) {} |
| +AllocationContext::AllocationContext(const Backtrace& backtrace, |
| + const char* type_name) |
| + : backtrace(backtrace), type_name(type_name) {} |
| + |
| bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) { |
| return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name); |
| } |
| +bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) { |
| + return !(lhs == rhs); |
| +} |
| } // namespace trace_event |
| } // namespace base |
| @@ -50,13 +61,31 @@ size_t hash<StackFrame>::operator()(const StackFrame& frame) const { |
| } |
| size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { |
| - const void* values[Backtrace::kMaxFrameCount]; |
| - for (size_t i = 0; i != backtrace.frame_count; ++i) { |
| - values[i] = backtrace.frames[i].value; |
| + // This function needs to be fast, because AllocationRegister checks |
| + // its Backtrace hash map on every Insert() call. Surprisingly, Knuth's |
|
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
Not sure I'm that "surprised" :)
|
| + // fast multiplicative hash produces great results. |
| + const uintptr_t kKnuthConstant = 2654435761; |
|
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
s/2654435761/2654435761u/ (trailing u)
I never rem
Dmitry Skiba
2016/06/28 10:54:58
Done.
|
| + const size_t kHashableCount = 10; |
| + |
| + uintptr_t hash = 0; |
| + |
| + size_t head_end = std::min(backtrace.frame_count, kHashableCount); |
| + for (size_t i = 0; i != head_end; ++i) { |
| + hash += reinterpret_cast<uintptr_t>( |
| + backtrace.frames[i].value) * kKnuthConstant; |
| } |
| - return base::SuperFastHash( |
| - reinterpret_cast<const char*>(values), |
| - static_cast<int>(backtrace.frame_count * sizeof(*values))); |
| + |
| + size_t tail_start = backtrace.frame_count - |
| + std::min(backtrace.frame_count - head_end, kHashableCount); |
| + for (size_t i = tail_start; i != backtrace.frame_count; ++i) { |
| + hash += reinterpret_cast<uintptr_t>( |
| + backtrace.frames[i].value) * kKnuthConstant; |
| + } |
| + |
| + // Also include number of frames. |
| + hash += backtrace.frame_count * kKnuthConstant; |
| + |
| + return hash; |
| } |
| size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const { |