Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(411)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context.cc

Issue 1891543003: [tracing] Turn StackFrame into struct. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add "the ones closer to main()" Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context.h" 5 #include "base/trace_event/heap_profiler_allocation_context.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "base/hash.h" 9 #include "base/hash.h"
10 #include "base/macros.h" 10 #include "base/macros.h"
11 11
12 namespace base { 12 namespace base {
13 namespace trace_event { 13 namespace trace_event {
14 14
15 // Constructor that does not initialize members. 15 bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
16 AllocationContext::AllocationContext() {} 16 return lhs.value < rhs.value;
17
18 // static
19 AllocationContext AllocationContext::Empty() {
20 AllocationContext ctx;
21
22 for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++)
23 ctx.backtrace.frames[i] = nullptr;
24
25 ctx.type_name = nullptr;
26
27 return ctx;
28 } 17 }
29 18
19 bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
20 return lhs.value == rhs.value;
21 }
22
23 bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
24 return !(lhs.value == rhs.value);
25 }
26
27 Backtrace::Backtrace(): frame_count(0) {}
28
30 bool operator==(const Backtrace& lhs, const Backtrace& rhs) { 29 bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
31 // Pointer equality of the stack frames is assumed, so instead of doing a deep 30 if (lhs.frame_count != rhs.frame_count) return false;
32 // string comparison on all of the frames, a |memcmp| suffices. 31 return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
33 return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0;
34 } 32 }
35 33
34 AllocationContext::AllocationContext(): type_name(nullptr) {}
35
36 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) { 36 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
37 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name); 37 return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
38 } 38 }
39 39
40 } // namespace trace_event 40 } // namespace trace_event
41 } // namespace base 41 } // namespace base
42 42
43 namespace BASE_HASH_NAMESPACE { 43 namespace BASE_HASH_NAMESPACE {
44 using base::trace_event::AllocationContext; 44 using base::trace_event::AllocationContext;
45 using base::trace_event::Backtrace; 45 using base::trace_event::Backtrace;
46 using base::trace_event::StackFrame;
47
48 size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
49 return hash<const void*>()(frame.value);
50 }
46 51
47 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { 52 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
48 return base::SuperFastHash(reinterpret_cast<const char*>(backtrace.frames), 53 const void* values[Backtrace::kMaxFrameCount];
49 sizeof(backtrace.frames)); 54 for (size_t i = 0; i != backtrace.frame_count; ++i) {
55 values[i] = backtrace.frames[i].value;
56 }
57 return base::SuperFastHash(reinterpret_cast<const char*>(values),
58 backtrace.frame_count * sizeof(*values));
50 } 59 }
51 60
52 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const { 61 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
53 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace); 62 size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
54 63
55 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits, 64 // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
56 // because the magic number is a prime very close to 2^32 / golden ratio, but 65 // because the magic number is a prime very close to 2^32 / golden ratio, but
57 // will still redistribute keys bijectively on 64-bit architectures because 66 // will still redistribute keys bijectively on 64-bit architectures because
58 // the magic number is coprime to 2^64. 67 // the magic number is coprime to 2^64.
59 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761; 68 size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
60 69
61 // Multiply one side to break the commutativity of +. Multiplication with a 70 // Multiply one side to break the commutativity of +. Multiplication with a
62 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so 71 // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
63 // randomness is preserved. 72 // randomness is preserved.
64 return (backtrace_hash * 3) + type_hash; 73 return (backtrace_hash * 3) + type_hash;
65 } 74 }
66 75
67 } // BASE_HASH_NAMESPACE 76 } // BASE_HASH_NAMESPACE
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_context.h ('k') | base/trace_event/heap_profiler_allocation_context_tracker.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698