Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context.cc

Issue 1839503002: [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add type to StackFrame; format thread name Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context.h" 5 #include "base/trace_event/heap_profiler_allocation_context.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "base/hash.h" 9 #include "base/hash.h"
10 #include "base/macros.h" 10 #include "base/macros.h"
11 11
12 namespace base { 12 namespace base {
13 namespace trace_event { 13 namespace trace_event {
14 14
15 bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
16 return lhs.value < rhs.value;
17 }
18
19 bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
20 return lhs.value == rhs.value;
21 }
22
23 bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
24 return lhs.value != rhs.value;
25 }
26
15 // Constructor that does not initialize members. 27 // Constructor that does not initialize members.
16 AllocationContext::AllocationContext() {} 28 AllocationContext::AllocationContext() {}
17 29
18 // static 30 // static
19 AllocationContext AllocationContext::Empty() { 31 AllocationContext AllocationContext::Empty() {
20 AllocationContext ctx; 32 AllocationContext ctx;
21 33
22 for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++) 34 for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++)
23 ctx.backtrace.frames[i] = nullptr; 35 ctx.backtrace.frames[i] = StackFrame::Empty();
24 36
25 ctx.type_name = nullptr; 37 ctx.type_name = nullptr;
26 38
27 return ctx; 39 return ctx;
28 } 40 }
29 41
30 bool operator==(const Backtrace& lhs, const Backtrace& rhs) { 42 bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
31 // Pointer equality of the stack frames is assumed, so instead of doing a deep 43 // Pointer equality of the stack frames is assumed, so instead of doing a deep
32 // string comparison on all of the frames, a |memcmp| suffices. 44 // string comparison on all of the frames, a |memcmp| suffices.
33 return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0; 45 return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0;
34 } 46 }
35 47
36 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) { 48 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
37 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name); 49 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
38 } 50 }
39 51
40 } // namespace trace_event 52 } // namespace trace_event
41 } // namespace base 53 } // namespace base
42 54
43 namespace BASE_HASH_NAMESPACE { 55 namespace BASE_HASH_NAMESPACE {
44 using base::trace_event::AllocationContext; 56 using base::trace_event::AllocationContext;
45 using base::trace_event::Backtrace; 57 using base::trace_event::Backtrace;
58 using base::trace_event::StackFrame;
59
60 size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
61 return this->hash<const void*>::operator()(frame.value);
62 }
46 63
47 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { 64 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
48 return base::SuperFastHash(reinterpret_cast<const char*>(backtrace.frames), 65 return base::SuperFastHash(reinterpret_cast<const char*>(backtrace.frames),
49 sizeof(backtrace.frames)); 66 sizeof(backtrace.frames));
50 } 67 }
51 68
52 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const { 69 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
53 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace); 70 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
54 71
55 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits, 72 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
56 // because the magic number is a prime very close to 2^32 / golden ratio, but 73 // because the magic number is a prime very close to 2^32 / golden ratio, but
57 // will still redistribute keys bijectively on 64-bit architectures because 74 // will still redistribute keys bijectively on 64-bit architectures because
58 // the magic number is coprime to 2^64. 75 // the magic number is coprime to 2^64.
59 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761; 76 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
60 77
61 // Multiply one side to break the commutativity of +. Multiplication with a 78 // Multiply one side to break the commutativity of +. Multiplication with a
62 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so 79 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
63 // randomness is preserved. 80 // randomness is preserved.
64 return (backtrace_hash * 3) + type_hash; 81 return (backtrace_hash * 3) + type_hash;
65 } 82 }
66 83
67 } // BASE_HASH_NAMESPACE 84 } // BASE_HASH_NAMESPACE
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698