Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context.cc

Issue 2089253002: [tracing] Optimize AllocationRegister and increase max backtrace depth. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Cleanup tests Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context.h" 5 #include "base/trace_event/heap_profiler_allocation_context.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "base/hash.h" 9 #include "base/hash.h"
10 #include "base/macros.h" 10 #include "base/macros.h"
(...skipping 13 matching lines...) Expand all
24 return !(lhs.value == rhs.value); 24 return !(lhs.value == rhs.value);
25 } 25 }
26 26
27 Backtrace::Backtrace(): frame_count(0) {} 27 Backtrace::Backtrace(): frame_count(0) {}
28 28
29 bool operator==(const Backtrace& lhs, const Backtrace& rhs) { 29 bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
30 if (lhs.frame_count != rhs.frame_count) return false; 30 if (lhs.frame_count != rhs.frame_count) return false;
31 return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames); 31 return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
32 } 32 }
33 33
34 bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
35 return !(lhs == rhs);
36 }
37
34 AllocationContext::AllocationContext(): type_name(nullptr) {} 38 AllocationContext::AllocationContext(): type_name(nullptr) {}
35 39
40 AllocationContext::AllocationContext(const Backtrace& backtrace,
41 const char* type_name)
42 : backtrace(backtrace), type_name(type_name) {}
43
36 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) { 44 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
37 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name); 45 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
38 } 46 }
39 47
48 bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
49 return !(lhs == rhs);
50 }
40 } // namespace trace_event 51 } // namespace trace_event
41 } // namespace base 52 } // namespace base
42 53
43 namespace BASE_HASH_NAMESPACE { 54 namespace BASE_HASH_NAMESPACE {
44 using base::trace_event::AllocationContext; 55 using base::trace_event::AllocationContext;
45 using base::trace_event::Backtrace; 56 using base::trace_event::Backtrace;
46 using base::trace_event::StackFrame; 57 using base::trace_event::StackFrame;
47 58
48 size_t hash<StackFrame>::operator()(const StackFrame& frame) const { 59 size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
49 return hash<const void*>()(frame.value); 60 return hash<const void*>()(frame.value);
50 } 61 }
51 62
52 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { 63 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
53 const void* values[Backtrace::kMaxFrameCount]; 64 // This function needs to be fast, because AllocationRegister checks
54 for (size_t i = 0; i != backtrace.frame_count; ++i) { 65 // its Backtrace hash map on every Insert() call. Surprisingly, Knuth's
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 Not sure I'm that "surprised" :)
55 values[i] = backtrace.frames[i].value; 66 // fast multiplicative hash produces great results.
67 const uintptr_t kKnuthConstant = 2654435761;
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 s/2654435761/2654435761u/ (trailing u) I never rem
Dmitry Skiba 2016/06/28 10:54:58 Done.
68 const size_t kHashableCount = 10;
69
70 uintptr_t hash = 0;
71
72 size_t head_end = std::min(backtrace.frame_count, kHashableCount);
73 for (size_t i = 0; i != head_end; ++i) {
74 hash += reinterpret_cast<uintptr_t>(
75 backtrace.frames[i].value) * kKnuthConstant;
56 } 76 }
57 return base::SuperFastHash( 77
58 reinterpret_cast<const char*>(values), 78 size_t tail_start = backtrace.frame_count -
59 static_cast<int>(backtrace.frame_count * sizeof(*values))); 79 std::min(backtrace.frame_count - head_end, kHashableCount);
80 for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
81 hash += reinterpret_cast<uintptr_t>(
82 backtrace.frames[i].value) * kKnuthConstant;
83 }
84
85 // Also include number of frames.
86 hash += backtrace.frame_count * kKnuthConstant;
87
88 return hash;
60 } 89 }
61 90
62 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const { 91 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
63 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace); 92 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
64 93
65 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits, 94 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
66 // because the magic number is a prime very close to 2^32 / golden ratio, but 95 // because the magic number is a prime very close to 2^32 / golden ratio, but
67 // will still redistribute keys bijectively on 64-bit architectures because 96 // will still redistribute keys bijectively on 64-bit architectures because
68 // the magic number is coprime to 2^64. 97 // the magic number is coprime to 2^64.
69 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761; 98 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
70 99
71 // Multiply one side to break the commutativity of +. Multiplication with a 100 // Multiply one side to break the commutativity of +. Multiplication with a
72 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so 101 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
73 // randomness is preserved. 102 // randomness is preserved.
74 return (backtrace_hash * 3) + type_hash; 103 return (backtrace_hash * 3) + type_hash;
75 } 104 }
76 105
77 } // BASE_HASH_NAMESPACE 106 } // BASE_HASH_NAMESPACE
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698