Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Unified Diff: base/trace_event/sharded_allocation_register.cc

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: comment from primiano. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/trace_event/sharded_allocation_register.cc
diff --git a/base/trace_event/sharded_allocation_register.cc b/base/trace_event/sharded_allocation_register.cc
new file mode 100644
index 0000000000000000000000000000000000000000..80895211637802da59b943fc3ec37b38be951687
--- /dev/null
+++ b/base/trace_event/sharded_allocation_register.cc
@@ -0,0 +1,105 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/sharded_allocation_register.h"
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+// This number affects the bucket and capacity counts of AllocationRegister at
+// "base/trace_event/heap_profiler_allocation_register.h".
+#if defined(OS_ANDROID) || defined(OS_IOS)
+size_t ShardCount = 1;
+#else
+size_t ShardCount = 64;
+#endif
+
+size_t HashAddress(const void* address) {
Primiano Tucci (use gerrit) 2017/05/22 16:37:51 since this is copy-pasted from a header that is in
erikchen 2017/05/22 17:10:43 Done.
+ // The multiplicative hashing scheme from [Knuth 1998].
+ const uintptr_t key = reinterpret_cast<uintptr_t>(address);
+ const uintptr_t a = 131101;
+ const uintptr_t shift = 15;
+ const uintptr_t h = (key * a) >> shift;
+ return h;
+}
+
+ShardedAllocationRegister::ShardedAllocationRegister() : enabled_(false) {}
+
+ShardedAllocationRegister::~ShardedAllocationRegister() = default;
+
+void ShardedAllocationRegister::Initialize() {
+ allocation_registers_.resize(ShardCount);
+ for (std::unique_ptr<RegisterAndLock>& ral : allocation_registers_)
+ ral.reset(new RegisterAndLock);
Primiano Tucci (use gerrit) 2017/05/22 16:37:51 doesn't make a difference here (so I don't care ab
erikchen 2017/05/22 17:10:43 Done.
+}
+
+bool ShardedAllocationRegister::IsInitialized() const {
+ return !allocation_registers_.empty();
+}
+
+void ShardedAllocationRegister::SetEnabled(bool enabled) {
Primiano Tucci (use gerrit) 2017/05/22 16:37:50 I would remove this method and do this in initiali
erikchen 2017/05/22 17:10:43 Done. [SetEnabled and SetDisabled]
+ base::subtle::Release_Store(&enabled_, enabled ? 1 : 0);
+}
+
+bool ShardedAllocationRegister::IsEnabled() const {
+ return base::subtle::Acquire_Load(&enabled_);
Primiano Tucci (use gerrit) 2017/05/22 16:37:51 I'd make move this to the header file and turn int
erikchen 2017/05/22 17:10:43 Done.
+}
+
+bool ShardedAllocationRegister::Insert(const void* address,
+ size_t size,
+ const AllocationContext& context) {
+ size_t index = HashAddress(address) % ShardCount;
+ std::unique_ptr<RegisterAndLock>& ral = allocation_registers_[index];
+ AutoLock lock(ral->lock);
+ return ral->allocation_register.Insert(address, size, context);
+}
+
+void ShardedAllocationRegister::Remove(const void* address) {
+ size_t index = HashAddress(address) % ShardCount;
+ std::unique_ptr<RegisterAndLock>& ral = allocation_registers_[index];
+ AutoLock lock(ral->lock);
+ return ral->allocation_register.Remove(address);
+}
+
+void ShardedAllocationRegister::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) const {
+ size_t allocated = 0;
+ size_t resident = 0;
+ for (const std::unique_ptr<RegisterAndLock>& ral : allocation_registers_) {
+ AutoLock lock(ral->lock);
+ allocated += ral->allocation_register.EstimateAllocatedMemory();
+ resident += ral->allocation_register.EstimateResidentMemory();
+ }
+
+ overhead->Add(TraceEventMemoryOverhead::kHeapProfilerAllocationRegister,
+ allocated, resident);
+}
+
+ShardedAllocationRegister::OutputMetrics
+ShardedAllocationRegister::UpdateAndReturnsMetrics(MetricsMap& map) const {
+ OutputMetrics output_metrics;
+ output_metrics.size = 0;
+ output_metrics.count = 0;
+ for (const std::unique_ptr<RegisterAndLock>& ral : allocation_registers_) {
+ AutoLock lock(ral->lock);
+ for (const auto& alloc_size : ral->allocation_register) {
+ AllocationMetrics& metrics = map[alloc_size.context];
+ metrics.size += alloc_size.size;
+ metrics.count++;
+
+ output_metrics.size += alloc_size.size;
+ output_metrics.count++;
+ }
+ }
+ return output_metrics;
+}
+
+ShardedAllocationRegister::RegisterAndLock::RegisterAndLock() = default;
+ShardedAllocationRegister::RegisterAndLock::~RegisterAndLock() = default;
+
+} // namespace trace_event
+} // namespace base

Powered by Google App Engine
This is Rietveld 408576698