Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(215)

Side by Side Diff: base/trace_event/sharded_allocation_register.h

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: Clean up. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
7
8 #include <memory>
9 #include <unordered_map>
10 #include <vector>
11
12 #include "base/atomicops.h"
13 #include "base/base_export.h"
14 #include "base/macros.h"
15 #include "base/synchronization/lock.h"
16 #include "base/trace_event/heap_profiler_allocation_register.h"
17
18 namespace base {
19 namespace trace_event {
20
21 class TraceEventMemoryOverhead;
22
23 // This container holds allocations, and context for each allocation [in the
24 // form of a back trace].
25 // This container is thread-safe.
26 class BASE_EXPORT ShardedAllocationRegister {
27 public:
28 using MetricsMap = std::unordered_map<AllocationContext, AllocationMetrics>;
29
30 struct OutputMetrics {
31 // Total size of allocated objects.
32 size_t size;
33 // Total count of allocated objects.
34 size_t count;
35 };
36
37 ShardedAllocationRegister();
38
39 void SetEnabled();
40 void SetDisabled();
41 bool is_enabled() const { return !!base::subtle::Acquire_Load(&enabled_); }
42
43 ~ShardedAllocationRegister();
44
45 // Inserts allocation details into the container. If the address was present
46 // already, its details are updated. |address| must not be null.
47 //
48 // Returns true if an insert occurred. Inserts may fail because the table
49 // is full.
50 bool Insert(const void* address,
51 size_t size,
52 const AllocationContext& context);
53
54 // Removes the address from the container if it is present. It is ok to call
55 // this with a null pointer.
56 void Remove(const void* address);
57
58 // Estimates memory overhead including |sizeof(AllocationRegister)|.
59 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
60
61 // Updates |map| with all allocated objects and their statistics.
62 // Returns aggregate statistics.
63 OutputMetrics UpdateAndReturnsMetrics(MetricsMap& map) const;
64
65 private:
66 struct RegisterAndLock {
67 RegisterAndLock();
68 ~RegisterAndLock();
69 AllocationRegister allocation_register;
70 Lock lock;
71 };
72 std::unique_ptr<RegisterAndLock[]> allocation_registers_;
73
74 // This member needs to be checked on every allocation and deallocation [fast
75 // path] when heap profiling is enabled. Using a lock here causes significant
76 // contention.
77 base::subtle::Atomic32 enabled_;
78
79 DISALLOW_COPY_AND_ASSIGN(ShardedAllocationRegister);
80 };
81
82 } // namespace trace_event
83 } // namespace base
84
85 #endif // BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698