OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/trace_event/sharded_allocation_register.h" | |
6 | |
7 #include "base/trace_event/trace_event_memory_overhead.h" | |
8 #include "build/build_config.h" | |
9 | |
10 namespace base { | |
11 namespace trace_event { | |
12 | |
13 // This number affects the bucket and capacity counts of AllocationRegister at | |
14 // "base/trace_event/heap_profiler_allocation_register.h". | |
15 #if defined(OS_ANDROID) || defined(OS_IOS) | |
16 size_t ShardCount = 1; | |
Primiano Tucci (use gerrit)
2017/05/22 17:37:55
this should have been a const size_t kShardCount,
erikchen
2017/05/22 19:15:32
Done.
| |
17 #else | |
18 size_t ShardCount = 64; | |
19 #endif | |
20 | |
21 ShardedAllocationRegister::ShardedAllocationRegister() : enabled_(false) {} | |
22 | |
23 ShardedAllocationRegister::~ShardedAllocationRegister() = default; | |
24 | |
25 void ShardedAllocationRegister::SetEnabled() { | |
26 if (!allocation_registers_) | |
27 allocation_registers_.reset(new RegisterAndLock[ShardCount]); | |
28 base::subtle::Release_Store(&enabled_, 1); | |
29 } | |
30 | |
31 void ShardedAllocationRegister::SetDisabled() { | |
32 base::subtle::Release_Store(&enabled_, 0); | |
33 } | |
34 | |
35 bool ShardedAllocationRegister::Insert(const void* address, | |
36 size_t size, | |
37 const AllocationContext& context) { | |
38 AllocationRegister::AddressHasher hasher; | |
39 size_t index = hasher(address) % ShardCount; | |
40 RegisterAndLock& ral = allocation_registers_[index]; | |
41 AutoLock lock(ral.lock); | |
42 return ral.allocation_register.Insert(address, size, context); | |
43 } | |
44 | |
45 void ShardedAllocationRegister::Remove(const void* address) { | |
46 AllocationRegister::AddressHasher hasher; | |
47 size_t index = hasher(address) % ShardCount; | |
48 RegisterAndLock& ral = allocation_registers_[index]; | |
49 AutoLock lock(ral.lock); | |
50 return ral.allocation_register.Remove(address); | |
51 } | |
52 | |
53 void ShardedAllocationRegister::EstimateTraceMemoryOverhead( | |
54 TraceEventMemoryOverhead* overhead) const { | |
55 size_t allocated = 0; | |
56 size_t resident = 0; | |
57 for (size_t i = 0; i < ShardCount; ++i) { | |
58 RegisterAndLock& ral = allocation_registers_[i]; | |
59 AutoLock lock(ral.lock); | |
60 allocated += ral.allocation_register.EstimateAllocatedMemory(); | |
61 resident += ral.allocation_register.EstimateResidentMemory(); | |
62 } | |
63 | |
64 overhead->Add(TraceEventMemoryOverhead::kHeapProfilerAllocationRegister, | |
65 allocated, resident); | |
66 } | |
67 | |
68 ShardedAllocationRegister::OutputMetrics | |
69 ShardedAllocationRegister::UpdateAndReturnsMetrics(MetricsMap& map) const { | |
70 OutputMetrics output_metrics; | |
71 output_metrics.size = 0; | |
72 output_metrics.count = 0; | |
73 for (size_t i = 0; i < ShardCount; ++i) { | |
74 RegisterAndLock& ral = allocation_registers_[i]; | |
75 AutoLock lock(ral.lock); | |
76 for (const auto& alloc_size : ral.allocation_register) { | |
77 AllocationMetrics& metrics = map[alloc_size.context]; | |
78 metrics.size += alloc_size.size; | |
79 metrics.count++; | |
80 | |
81 output_metrics.size += alloc_size.size; | |
82 output_metrics.count++; | |
83 } | |
84 } | |
85 return output_metrics; | |
86 } | |
87 | |
88 ShardedAllocationRegister::RegisterAndLock::RegisterAndLock() = default; | |
89 ShardedAllocationRegister::RegisterAndLock::~RegisterAndLock() = default; | |
90 | |
91 } // namespace trace_event | |
92 } // namespace base | |
OLD | NEW |