Chromium Code Reviews| Index: base/trace_event/heap_profiler_event_writer.cc |
| diff --git a/base/trace_event/heap_profiler_event_writer.cc b/base/trace_event/heap_profiler_event_writer.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..08880e943c2b69911d8133fe0c9ec71d377eb9ff |
| --- /dev/null |
| +++ b/base/trace_event/heap_profiler_event_writer.cc |
| @@ -0,0 +1,133 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
|
Primiano Tucci (use gerrit)
2017/03/09 11:47:44
Stop living in the past :P (2015 -> 2017)
DmitrySkiba
2017/03/14 22:12:47
Done.
|
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/trace_event/heap_profiler_event_writer.h" |
| + |
| +#include <stdint.h> |
| + |
| +#include <tuple> |
| + |
| +#include "base/containers/hash_tables.h" |
|
Primiano Tucci (use gerrit)
2017/03/09 11:47:44
I think this is really just for legacy code, this
DmitrySkiba
2017/03/14 22:12:47
Done.
|
| +#include "base/hash.h" |
| +#include "base/memory/ptr_util.h" |
| +#include "base/numerics/safe_conversions.h" |
| +#include "base/trace_event/heap_profiler_allocation_register.h" |
| +#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
| +#include "base/trace_event/heap_profiler_string_deduplicator.h" |
| +#include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
| +#include "base/trace_event/memory_dump_session_state.h" |
| +#include "base/trace_event/trace_event_argument.h" |
| + |
| +namespace base { |
| +namespace trace_event { |
| + |
| +namespace { |
| + |
| +struct AggregationKey { |
|
Primiano Tucci (use gerrit)
2017/03/09 11:47:44
I think you can just:
struct AggregationKey : publ
DmitrySkiba
2017/03/14 22:12:47
I started with that (using tuple), but I didn't li
|
| + int backtrace_id; |
| + int type_id; |
| + |
| + struct Hasher { |
| + size_t operator()(const AggregationKey& key) const { |
| + return base::HashInts(key.backtrace_id, key.type_id); |
| + } |
| + }; |
| + |
| + bool operator==(const AggregationKey& other) const { |
| + return backtrace_id == other.backtrace_id && type_id == other.type_id; |
| + } |
| +}; |
| + |
| +} // namespace |
| + |
| +std::unique_ptr<TracedValue> ExportHeapDump( |
| + const AllocationRegister& allocation_register, |
| + const MemoryDumpSessionState& session_state) { |
| + // Aggregate allocations by {backtrace_id, type_id} key. |
| + hash_map<AggregationKey, AllocationMetrics, AggregationKey::Hasher> |
|
Primiano Tucci (use gerrit)
2017/03/09 11:47:44
The comment on hash_map says // Use std::unordered
DmitrySkiba
2017/03/14 22:12:47
Done.
|
| + metrics_by_key; |
| + for (const auto& allocation : allocation_register) { |
| + int backtrace_id = session_state.stack_frame_deduplicator()->Insert( |
| + std::begin(allocation.context.backtrace.frames), |
| + std::begin(allocation.context.backtrace.frames) + |
| + allocation.context.backtrace.frame_count); |
| + |
| + int type_id = session_state.type_name_deduplicator()->Insert( |
| + allocation.context.type_name); |
| + |
| + AggregationKey key = {backtrace_id, type_id}; |
| + AllocationMetrics& metrics = metrics_by_key[key]; |
| + metrics.size += allocation.size; |
| + metrics.count += 1; |
| + } |
| + |
| + auto traced_value = MakeUnique<TracedValue>(); |
| + |
| + traced_value->BeginArray("nodes"); |
| + for (const auto& key_and_metrics : metrics_by_key) |
| + traced_value->AppendInteger(key_and_metrics.first.backtrace_id); |
| + traced_value->EndArray(); |
| + |
| + traced_value->BeginArray("types"); |
| + for (const auto& key_and_metrics : metrics_by_key) |
| + traced_value->AppendInteger(key_and_metrics.first.type_id); |
| + traced_value->EndArray(); |
| + |
| + traced_value->BeginArray("counts"); |
| + for (const auto& key_and_metrics : metrics_by_key) |
| + traced_value->AppendInteger( |
| + saturated_cast<int>(key_and_metrics.second.count)); |
|
Primiano Tucci (use gerrit)
2017/03/09 11:47:44
nice touch of class :)
|
| + traced_value->EndArray(); |
| + |
| + traced_value->BeginArray("sizes"); |
| + for (const auto& key_and_metrics : metrics_by_key) |
| + traced_value->AppendInteger( |
| + saturated_cast<int>(key_and_metrics.second.size)); |
| + traced_value->EndArray(); |
| + |
| + return traced_value; |
| +} |
| + |
| +std::unique_ptr<TracedValue> ExportHeapProfileEventData( |
| + const ExportedHeapDumpsMap& heap_dumps, |
| + const MemoryDumpSessionState& session_state) { |
| + auto traced_value = MakeUnique<TracedValue>(); |
| + |
| + // See brief description of the format in the header file. |
| + traced_value->SetInteger("version", 1); |
| + |
| + traced_value->BeginDictionary("allocators"); |
| + for (const auto& name_and_dump : heap_dumps) { |
| + traced_value->SetValueWithCopiedName(name_and_dump.first.c_str(), |
| + *name_and_dump.second); |
| + } |
| + traced_value->EndDictionary(); |
| + |
| + traced_value->BeginDictionary("maps"); |
| + |
| + if (auto* deduplicator = session_state.stack_frame_deduplicator()) { |
| + traced_value->BeginArray("nodes"); |
| + deduplicator->ExportIncrementally(&*traced_value); |
| + traced_value->EndArray(); |
| + } |
| + |
| + if (auto* deduplicator = session_state.type_name_deduplicator()) { |
| + traced_value->BeginArray("types"); |
| + deduplicator->ExportIncrementally(&*traced_value); |
| + traced_value->EndArray(); |
| + } |
| + |
| + if (auto* deduplicator = session_state.string_deduplicator()) { |
| + traced_value->BeginArray("strings"); |
| + deduplicator->ExportIncrementally(&*traced_value); |
| + traced_value->EndArray(); |
| + } |
| + |
| + traced_value->EndDictionary(); |
| + |
| + return traced_value; |
| +} |
| + |
| +} // namespace trace_event |
| +} // namespace base |