| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "gpu/common/gpu_trace_event.h" | |
| 6 | |
| 7 #include "base/format_macros.h" | |
| 8 #include "base/process_util.h" | |
| 9 #include "base/stringprintf.h" | |
| 10 #include "base/utf_string_conversions.h" | |
| 11 #include "base/time.h" | |
| 12 | |
| 13 #define USE_UNRELIABLE_NOW | |
| 14 | |
| 15 using namespace base; | |
| 16 | |
| 17 namespace gpu { | |
| 18 | |
| 19 // Controls the number of trace events we will buffer in-memory | |
| 20 // before throwing them away. | |
| 21 #define TRACE_EVENT_BUFFER_SIZE 500000 | |
| 22 #define TRACE_EVENT_BATCH_SIZE 1000 | |
| 23 | |
| 24 //////////////////////////////////////////////////////////////////////////////// | |
| 25 // | |
| 26 // TraceLog::Category | |
| 27 // | |
| 28 //////////////////////////////////////////////////////////////////////////////// | |
| 29 TraceCategory::TraceCategory(const char* name, bool enabled) | |
| 30 : name_(name) { | |
| 31 base::subtle::NoBarrier_Store(&enabled_, | |
| 32 static_cast<base::subtle::Atomic32>(enabled)); | |
| 33 } | |
| 34 | |
| 35 TraceCategory::~TraceCategory() { | |
| 36 base::subtle::NoBarrier_Store(&enabled_, | |
| 37 static_cast<base::subtle::Atomic32>(0)); | |
| 38 } | |
| 39 | |
| 40 //////////////////////////////////////////////////////////////////////////////// | |
| 41 // | |
| 42 // TraceEvent | |
| 43 // | |
| 44 //////////////////////////////////////////////////////////////////////////////// | |
| 45 | |
| 46 namespace { | |
| 47 const char* GetPhaseStr(TraceEventPhase phase) { | |
| 48 if (phase == GPU_TRACE_EVENT_PHASE_BEGIN) { | |
| 49 return "B"; | |
| 50 } else if (phase == GPU_TRACE_EVENT_PHASE_INSTANT) { | |
| 51 return "I"; | |
| 52 } else if (phase == GPU_TRACE_EVENT_PHASE_END) { | |
| 53 return "E"; | |
| 54 } else { | |
| 55 DCHECK(false); | |
| 56 return "?"; | |
| 57 } | |
| 58 } | |
| 59 } | |
| 60 | |
| 61 TraceEvent::TraceEvent() | |
| 62 : processId(0), | |
| 63 threadId(0), | |
| 64 phase(GPU_TRACE_EVENT_PHASE_BEGIN), | |
| 65 category(NULL), | |
| 66 name(NULL) { | |
| 67 memset(&argNames, 0, sizeof(argNames)); | |
| 68 } | |
| 69 | |
| 70 TraceEvent::~TraceEvent() { | |
| 71 } | |
| 72 | |
| 73 | |
| 74 void TraceEvent::AppendAsJSON(std::string* out, | |
| 75 const std::vector<TraceEvent>& events, | |
| 76 size_t start, | |
| 77 size_t count) { | |
| 78 *out += "["; | |
| 79 for (size_t i = 0; i < count && start + i < events.size(); ++i) { | |
| 80 if (i > 0) | |
| 81 *out += ","; | |
| 82 events[i + start].AppendAsJSON(out); | |
| 83 } | |
| 84 *out += "]"; | |
| 85 } | |
| 86 | |
| 87 void TraceEvent::AppendAsJSON(std::string* out) const { | |
| 88 int nargs = 0; | |
| 89 for (int i = 0; i < TRACE_MAX_NUM_ARGS; ++i) { | |
| 90 if (argNames[i] == NULL) | |
| 91 break; | |
| 92 nargs += 1; | |
| 93 } | |
| 94 | |
| 95 const char* phaseStr = GetPhaseStr(phase); | |
| 96 int64 time_int64 = timestamp.ToInternalValue(); | |
| 97 long long unsigned int time_llui = | |
| 98 static_cast<long long unsigned int>(time_int64); | |
| 99 StringAppendF(out, | |
| 100 "{cat:'%s',pid:%i,tid:%i,ts:0x%llx,ph:'%s',name:'%s',args:{", | |
| 101 category->name(), | |
| 102 static_cast<int>(processId), | |
| 103 static_cast<int>(threadId), | |
| 104 time_llui, | |
| 105 phaseStr, | |
| 106 name); | |
| 107 for (int i = 0; i < nargs; ++i) { | |
| 108 if (i > 0) | |
| 109 *out += ","; | |
| 110 *out += argNames[i]; | |
| 111 *out += ":'"; | |
| 112 *out += argValues[i]; | |
| 113 *out += "'"; | |
| 114 } | |
| 115 *out += "}}"; | |
| 116 } | |
| 117 | |
| 118 //////////////////////////////////////////////////////////////////////////////// | |
| 119 // | |
| 120 // TraceLog | |
| 121 // | |
| 122 //////////////////////////////////////////////////////////////////////////////// | |
| 123 | |
| 124 // static | |
| 125 TraceLog* TraceLog::GetInstance() { | |
| 126 return Singleton<TraceLog, StaticMemorySingletonTraits<TraceLog> >::get(); | |
| 127 } | |
| 128 | |
| 129 TraceLog::TraceLog() | |
| 130 : enabled_(false) | |
| 131 { | |
| 132 logged_events_.reserve(1024); | |
| 133 } | |
| 134 | |
| 135 TraceLog::~TraceLog() { | |
| 136 } | |
| 137 | |
| 138 TraceCategory* TraceLog::GetCategory(const char* name) { | |
| 139 AutoLock lock(lock_); | |
| 140 // TODO(nduca): replace with a hash_map. | |
| 141 for (int i = static_cast<int>(categories_.size()) - 1; i >= 0; i--) { | |
| 142 if (strcmp(categories_[i]->name(), name) == 0) | |
| 143 return categories_[i]; | |
| 144 } | |
| 145 TraceCategory* category = new TraceCategory(name, enabled_); | |
| 146 categories_.push_back(category); | |
| 147 return category; | |
| 148 } | |
| 149 | |
| 150 void TraceLog::SetEnabled(bool enabled) { | |
| 151 AutoLock lock(lock_); | |
| 152 if (enabled == enabled_) | |
| 153 return; | |
| 154 if (enabled) { | |
| 155 // Enable all categories. | |
| 156 enabled_ = true; | |
| 157 for (size_t i = 0; i < categories_.size(); i++) { | |
| 158 base::subtle::NoBarrier_Store(&categories_[i]->enabled_, | |
| 159 static_cast<base::subtle::Atomic32>(1)); | |
| 160 } | |
| 161 } else { | |
| 162 // Disable all categories. | |
| 163 for (size_t i = 0; i < categories_.size(); i++) { | |
| 164 base::subtle::NoBarrier_Store(&categories_[i]->enabled_, | |
| 165 static_cast<base::subtle::Atomic32>(0)); | |
| 166 } | |
| 167 enabled_ = false; | |
| 168 FlushWithLockAlreadyHeld(); | |
| 169 } | |
| 170 } | |
| 171 | |
| 172 void TraceLog::SetOutputCallback(TraceLog::OutputCallback* cb) { | |
| 173 AutoLock lock(lock_); | |
| 174 if (enabled_) { | |
| 175 FlushWithLockAlreadyHeld(); | |
| 176 } | |
| 177 output_callback_.reset(cb); | |
| 178 } | |
| 179 | |
| 180 void TraceLog::AddRemotelyCollectedData(const std::string& json_events) { | |
| 181 AutoLock lock(lock_); | |
| 182 if (output_callback_.get()) | |
| 183 output_callback_->Run(json_events); | |
| 184 } | |
| 185 | |
| 186 void TraceLog::Flush() { | |
| 187 AutoLock lock(lock_); | |
| 188 FlushWithLockAlreadyHeld(); | |
| 189 } | |
| 190 | |
| 191 void TraceLog::FlushWithLockAlreadyHeld() { | |
| 192 if (output_callback_.get() && logged_events_.size()) { | |
| 193 for (size_t i = 0; i < logged_events_.size(); i += TRACE_EVENT_BATCH_SIZE) { | |
| 194 std::string json_events; | |
| 195 TraceEvent::AppendAsJSON(&json_events, logged_events_, | |
| 196 i, TRACE_EVENT_BATCH_SIZE); | |
| 197 output_callback_->Run(json_events); | |
| 198 } | |
| 199 } | |
| 200 logged_events_.erase(logged_events_.begin(), logged_events_.end()); | |
| 201 } | |
| 202 | |
| 203 void TraceLog::AddTraceEvent(TraceEventPhase phase, | |
| 204 const char* file, int line, | |
| 205 TraceCategory* category, | |
| 206 const char* name, | |
| 207 const char* arg1name, const char* arg1val, | |
| 208 const char* arg2name, const char* arg2val) { | |
| 209 DCHECK(file && name); | |
| 210 #ifdef USE_UNRELIABLE_NOW | |
| 211 TimeTicks now = TimeTicks::HighResNow(); | |
| 212 #else | |
| 213 TimeTicks now = TimeTicks::Now(); | |
| 214 #endif | |
| 215 //static_cast<unsigned long>(base::GetCurrentProcId()), | |
| 216 AutoLock lock(lock_); | |
| 217 if (logged_events_.size() >= TRACE_EVENT_BUFFER_SIZE) | |
| 218 return; | |
| 219 logged_events_.push_back(TraceEvent()); | |
| 220 TraceEvent& event = logged_events_.back(); | |
| 221 event.processId = static_cast<unsigned long>(base::GetCurrentProcId()); | |
| 222 event.threadId = PlatformThread::CurrentId(); | |
| 223 event.timestamp = now; | |
| 224 event.phase = phase; | |
| 225 event.category = category; | |
| 226 event.name = name; | |
| 227 event.argNames[0] = arg1name; | |
| 228 event.argValues[0] = arg1name ? arg1val : ""; | |
| 229 event.argNames[1] = arg2name; | |
| 230 event.argValues[1] = arg2name ? arg2val : ""; | |
| 231 COMPILE_ASSERT(TRACE_MAX_NUM_ARGS == 2, TraceEvent_arc_count_out_of_sync); | |
| 232 } | |
| 233 | |
| 234 } // namespace gpu | |
| OLD | NEW |