Index: base/trace_event/trace_log.cc |
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc |
index cc40ba98eb31f5c0b5e370bacbf8ca5b232a15dd..39a74491892c1561c787692fc61058dcbae3612a 100644 |
--- a/base/trace_event/trace_log.cc |
+++ b/base/trace_event/trace_log.cc |
@@ -16,6 +16,7 @@ |
#include "base/lazy_instance.h" |
#include "base/location.h" |
#include "base/macros.h" |
+#include "base/memory/ptr_util.h" |
#include "base/memory/ref_counted_memory.h" |
#include "base/memory/singleton.h" |
#include "base/process/process_metrics.h" |
@@ -102,6 +103,43 @@ const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
// The enabled flag is char instead of bool so that the API can be used from C. |
unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; |
+ |
+class TraceEventFilter { |
+ public: |
+ TraceEventFilter() {} |
+ virtual ~TraceEventFilter() {} |
+ virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0; |
+ |
+ private: |
+ DISALLOW_COPY_AND_ASSIGN(TraceEventFilter); |
+}; |
+ |
+class EventNameFilter : public TraceEventFilter { |
+ public: |
+ EventNameFilter(const base::DictionaryValue* filter_args) { |
+ const base::ListValue* whitelist = nullptr; |
+ if (filter_args->GetList("event_name_whitelist", &whitelist)) { |
+ for (size_t i = 0; i < whitelist->GetSize(); ++i) { |
+ std::string event_name; |
+ if (!whitelist->GetString(i, &event_name)) |
+ continue; |
+ |
+ whitelist_.insert(event_name); |
+ } |
+ } |
+ } |
+ |
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override { |
+ return ContainsKey(whitelist_, trace_event.name()); |
+ } |
+ |
+ private: |
+ std::unordered_set<std::string> whitelist_; |
+}; |
+ |
+base::LazyInstance<std::list<std::unique_ptr<TraceEventFilter>>>::Leaky |
+ g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; |
+ |
// Indexes here have to match the g_category_groups array indexes above. |
const int g_category_already_shutdown = 1; |
const int g_category_categories_exhausted = 2; |
@@ -179,6 +217,22 @@ void MakeHandle(uint32_t chunk_seq, |
handle->event_index = static_cast<uint16_t>(event_index); |
} |
+uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) { |
+ // Calculate the index of the category group by finding |
+ // category_group_enabled in g_category_group_enabled array. |
+ uintptr_t category_begin = |
+ reinterpret_cast<uintptr_t>(g_category_group_enabled); |
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
+ DCHECK(category_ptr >= category_begin && |
+ category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + |
+ MAX_CATEGORY_GROUPS)) |
+ << "out of bounds category pointer"; |
+ uintptr_t category_index = |
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
+ |
+ return category_index; |
+} |
+ |
} // namespace |
// A helper class that allows the lock to be acquired in the middle of the scope |
@@ -445,18 +499,13 @@ const unsigned char* TraceLog::GetCategoryGroupEnabled( |
const char* TraceLog::GetCategoryGroupName( |
const unsigned char* category_group_enabled) { |
- // Calculate the index of the category group by finding |
- // category_group_enabled in g_category_group_enabled array. |
- uintptr_t category_begin = |
- reinterpret_cast<uintptr_t>(g_category_group_enabled); |
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
- DCHECK(category_ptr >= category_begin && |
- category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + |
- MAX_CATEGORY_GROUPS)) |
- << "out of bounds category pointer"; |
- uintptr_t category_index = |
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
- return g_category_groups[category_index]; |
+ return g_category_groups[GetCategoryIndex(category_group_enabled)]; |
+} |
+ |
+std::list<std::unique_ptr<TraceEventFilter>>* GetCategoryGroupFilter( |
+ const unsigned char* category_group_enabled) { |
+ return g_category_group_filter[GetCategoryIndex(category_group_enabled)] |
+ .Pointer(); |
} |
void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
@@ -479,6 +528,28 @@ void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
} |
#endif |
+ // Having a filter is an exceptional case, so we avoid |
+ // the LazyInstance creation in the common case. |
+ if (!(g_category_group_filter[category_index] == nullptr)) |
+ g_category_group_filter[category_index].Get().clear(); |
+ |
+ for (const auto& event_filter : trace_config_.event_filters()) { |
+ if (event_filter.IsCategoryGroupEnabled(category_group)) { |
+ std::unique_ptr<TraceEventFilter> new_filter; |
+ |
+ if (event_filter.predicate_name() == "event_whitelist_predicate") { |
+ new_filter = |
+ WrapUnique(new EventNameFilter(event_filter.filter_args())); |
+ } |
+ |
+ if (new_filter) { |
+ g_category_group_filter[category_index].Get().push_back( |
+ std::move(new_filter)); |
+ enabled_flag |= ENABLED_FOR_FILTERING; |
+ } |
+ } |
+ } |
+ |
g_category_group_enabled[category_index] = enabled_flag; |
} |
@@ -1270,7 +1341,32 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
#endif // OS_WIN |
std::string console_message; |
- if (*category_group_enabled & ENABLED_FOR_RECORDING) { |
+ std::unique_ptr<TraceEvent> filtered_trace_event; |
+ if (*category_group_enabled & ENABLED_FOR_FILTERING) { |
+ std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); |
+ new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
+ phase, category_group_enabled, name, scope, id, |
+ bind_id, num_args, arg_names, arg_types, |
+ arg_values, convertable_values, flags); |
+ |
+ auto filter_list = GetCategoryGroupFilter(category_group_enabled); |
+ DCHECK(!filter_list->empty()); |
+ |
+ bool should_add_event = false; |
+ for (const auto& trace_event_filter : *filter_list) { |
+ if (trace_event_filter.get()->FilterTraceEvent(*new_trace_event)) |
ssid
2016/05/20 21:58:42
I dont think ".get()" is needed here. Just
trace_
oystein (OOO til 10th of July)
2016/05/24 01:05:13
Done.
|
+ should_add_event = true; |
+ } |
+ |
+ if (should_add_event) |
+ filtered_trace_event = std::move(new_trace_event); |
+ } |
+ |
+ // Add the trace event if we're either *just* recording (and not filtering) |
+ // or if we one of our filters indicates the event should be added. |
+ if (((*category_group_enabled & ENABLED_FOR_RECORDING) && |
+ (*category_group_enabled & ENABLED_FOR_FILTERING) == 0) || |
+ filtered_trace_event) { |
OptionalAutoLock lock(&lock_); |
TraceEvent* trace_event = NULL; |
@@ -1282,21 +1378,14 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
} |
if (trace_event) { |
- trace_event->Initialize(thread_id, |
- offset_event_timestamp, |
- thread_now, |
- phase, |
- category_group_enabled, |
- name, |
- scope, |
- id, |
- bind_id, |
- num_args, |
- arg_names, |
- arg_types, |
- arg_values, |
- convertable_values, |
- flags); |
+ if (filtered_trace_event) { |
+ trace_event->MoveFrom(std::move(filtered_trace_event)); |
+ } else { |
+ trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
+ phase, category_group_enabled, name, scope, id, |
+ bind_id, num_args, arg_names, arg_types, |
+ arg_values, convertable_values, flags); |
+ } |
#if defined(OS_ANDROID) |
trace_event->SendToATrace(); |