Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ | |
| 6 #define SRC_TRACING_TRACE_EVENT_H_ | |
| 7 | |
| 8 // This header file defines implementation details of how the trace macros in | |
| 9 // trace_event_common.h collect and store trace events. Anything not | |
| 10 // implementation-specific should go in trace_macros_common.h instead of here. | |
| 11 | |
| 12 #include "include/v8-tracing.h" | |
| 13 #include "src/tracing/trace-event-common.h" | |
| 14 | |
| 15 // By default, const char* argument values are assumed to have long-lived scope | |
| 16 // and will not be copied. Use this macro to force a const char* to be copied. | |
| 17 #define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str) | |
| 18 | |
| 19 // By default, uint64 ID argument values are not mangled with the Process ID in | |
| 20 // TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling. | |
| 21 #define TRACE_ID_MANGLE(id) v8::internal::tracing::TraceID::ForceMangle(id) | |
| 22 | |
| 23 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC | |
| 24 // macros. Use this macro to prevent Process ID mangling. | |
| 25 #define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id) | |
| 26 | |
| 27 // Sets the current sample state to the given category and name (both must be | |
| 28 // constant strings). These states are intended for a sampling profiler. | |
| 29 // Implementation note: we store category and name together because we don't | |
| 30 // want the inconsistency/expense of storing two pointers. | |
| 31 // |thread_bucket| is [0..2] and is used to statically isolate samples in one | |
| 32 // thread from others. | |
| 33 #define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ | |
| 34 name) \ | |
| 35 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \ | |
| 36 category "\0" name) | |
| 37 | |
| 38 // Returns a current sampling state of the given bucket. | |
| 39 #define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \ | |
| 40 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current() | |
| 41 | |
| 42 // Creates a scope of a sampling state of the given bucket. | |
| 43 // | |
| 44 // { // The sampling state is set within this scope. | |
| 45 // TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name"); | |
| 46 // ...; | |
| 47 // } | |
| 48 #define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ | |
| 49 name) \ | |
| 50 trace_event_internal::TraceEventSamplingStateScope<bucket_number> \ | |
| 51 traceEventSamplingScope(category "\0" name); | |
| 52 | |
| 53 | |
| 54 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ | |
| 55 *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ | |
| 56 (EventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \ | |
| 57 EventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags) | |
| 58 | |
| 59 // The following macro has no implementation, but it needs to exist since | |
| 60 // it gets called from scoped trace events. It cannot call UNIMPLEMENTED() | |
| 61 // since an empty implementation is a valid one. | |
| 62 #define INTERNAL_TRACE_MEMORY(category, name) | |
| 63 | |
| 64 //////////////////////////////////////////////////////////////////////////////// | |
| 65 // Implementation specific tracing API definitions. | |
| 66 | |
| 67 // Get a pointer to the enabled state of the given trace category. Only | |
| 68 // long-lived literal strings should be given as the category group. The | |
| 69 // returned pointer can be held permanently in a local static for example. If | |
| 70 // the unsigned char is non-zero, tracing is enabled. If tracing is enabled, | |
| 71 // TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled | |
| 72 // between the load of the tracing state and the call to | |
| 73 // TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out | |
| 74 // for best performance when tracing is disabled. | |
| 75 // const uint8_t* | |
| 76 // TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group) | |
| 77 #define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \ | |
| 78 EventTracer::GetInstance()->GetCategoryGroupEnabled | |
| 79 | |
| 80 // Get the number of times traces have been recorded. This is used to implement | |
| 81 // the TRACE_EVENT_IS_NEW_TRACE facility. | |
| 82 // unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED() | |
| 83 #define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \ | |
| 84 EventTracer::GetInstance()->getNumTracesRecorded | |
| 85 | |
| 86 // Add a trace event to the platform tracing system. | |
| 87 // EventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT( | |
| 88 // char phase, | |
| 89 // const uint8_t* category_group_enabled, | |
| 90 // const char* name, | |
| 91 // uint64_t id, | |
| 92 // uint64_t context_id, | |
| 93 // uint64_t bind_id, | |
| 94 // int num_args, | |
| 95 // const char** arg_names, | |
| 96 // const uint8_t* arg_types, | |
| 97 // const uint64_t* arg_values, | |
| 98 // unsigned int flags) | |
| 99 #define TRACE_EVENT_API_ADD_TRACE_EVENT \ | |
| 100 EventTracer::GetInstance()->AddTraceEvent | |
| 101 | |
| 102 // Set the duration field of a COMPLETE trace event. | |
| 103 // void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( | |
| 104 // const uint8_t* category_group_enabled, | |
| 105 // const char* name, | |
| 106 // EventTracer::Handle id) | |
| 107 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ | |
| 108 EventTracer::GetInstance()->UpdateTraceEventDuration | |
| 109 | |
| 110 // Defines atomic operations used internally by the tracing system. | |
| 111 #define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord | |
| 112 #define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var)) | |
| 113 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \ | |
| 114 v8::base::NoBarrier_Store(&(var), (value)) | |
| 115 | |
| 116 // The thread buckets for the sampling profiler. | |
| 117 extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; | |
| 118 | |
| 119 #define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \ | |
| 120 g_trace_state[thread_bucket] | |
| 121 | |
| 122 //////////////////////////////////////////////////////////////////////////////// | |
| 123 | |
| 124 // Implementation detail: trace event macros create temporary variables | |
| 125 // to keep instrumentation overhead low. These macros give each temporary | |
| 126 // variable a unique name based on the line number to prevent name collisions. | |
| 127 #define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b | |
| 128 #define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) | |
| 129 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \ | |
| 130 INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) | |
| 131 | |
| 132 // Implementation detail: internal macro to create static category. | |
| 133 // No barriers are needed, because this code is designed to operate safely | |
| 134 // even when the unsigned char* points to garbage data (which may be the case | |
| 135 // on processors without cache coherency). | |
| 136 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ | |
| 137 category_group, atomic, category_group_enabled) \ | |
| 138 category_group_enabled = \ | |
| 139 reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \ | |
| 140 if (!category_group_enabled) { \ | |
| 141 category_group_enabled = \ | |
| 142 TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \ | |
| 143 TRACE_EVENT_API_ATOMIC_STORE( \ | |
| 144 atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \ | |
| 145 category_group_enabled)); \ | |
| 146 } | |
| 147 | |
| 148 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \ | |
| 149 static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \ | |
| 150 const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \ | |
| 151 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ | |
| 152 category_group, INTERNAL_TRACE_EVENT_UID(atomic), \ | |
| 153 INTERNAL_TRACE_EVENT_UID(category_group_enabled)); | |
| 154 | |
| 155 // Implementation detail: internal macro to create static category and add | |
| 156 // event if the category is enabled. | |
| 157 #define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \ | |
| 158 do { \ | |
| 159 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ | |
| 160 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ | |
| 161 v8::internal::tracing::AddTraceEvent( \ | |
| 162 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 163 v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, flags, \ | |
| 164 ##__VA_ARGS__); \ | |
| 165 } \ | |
| 166 } while (0) | |
| 167 | |
| 168 // Implementation detail: internal macro to create static category and add begin | |
| 169 // event if the category is enabled. Also adds the end event when the scope | |
| 170 // ends. | |
| 171 #define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \ | |
| 172 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ | |
| 173 v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ | |
| 174 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ | |
| 175 EventTracer::Handle h = v8::internal::tracing::AddTraceEvent( \ | |
| 176 TRACE_EVENT_PHASE_COMPLETE, \ | |
| 177 INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 178 v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE, \ | |
| 179 v8::internal::tracing::kNoId, ##__VA_ARGS__); \ | |
| 180 INTERNAL_TRACE_EVENT_UID(tracer) \ | |
| 181 .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 182 h); \ | |
| 183 } | |
| 184 | |
| 185 #define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \ | |
| 186 bind_id, flow_flags, ...) \ | |
| 187 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ | |
| 188 v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ | |
| 189 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ | |
| 190 unsigned int trace_event_flags = flow_flags; \ | |
| 191 v8::internal::tracing::TraceID trace_event_bind_id(bind_id, \ | |
| 192 &trace_event_flags); \ | |
| 193 EventTracer::Handle h = v8::internal::tracing::AddTraceEvent( \ | |
| 194 TRACE_EVENT_PHASE_COMPLETE, \ | |
| 195 INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 196 v8::internal::tracing::kNoId, trace_event_bind_id.data(), \ | |
| 197 trace_event_flags, ##__VA_ARGS__); \ | |
| 198 INTERNAL_TRACE_EVENT_UID(tracer) \ | |
| 199 .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 200 h); \ | |
| 201 } | |
| 202 | |
| 203 // Implementation detail: internal macro to create static category and add | |
| 204 // event if the category is enabled. | |
| 205 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \ | |
| 206 flags, ...) \ | |
| 207 do { \ | |
| 208 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ | |
| 209 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ | |
| 210 unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ | |
| 211 v8::internal::tracing::TraceID trace_event_trace_id(id, \ | |
| 212 &trace_event_flags); \ | |
| 213 v8::internal::tracing::AddTraceEvent( \ | |
| 214 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ | |
| 215 trace_event_trace_id.data(), v8::internal::tracing::kNoId, \ | |
| 216 trace_event_flags, ##__VA_ARGS__); \ | |
| 217 } \ | |
| 218 } while (0) | |
| 219 | |
| 220 // Adds a trace event with a given timestamp. Not Implemented. | |
| 221 #define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \ | |
| 222 timestamp, flags, ...) \ | |
| 223 UNIMPLEMENTED() | |
| 224 | |
| 225 // Adds a trace event with a given id and timestamp. Not Implemented. | |
| 226 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \ | |
| 227 phase, category_group, name, id, timestamp, flags, ...) \ | |
| 228 UNIMPLEMENTED() | |
| 229 | |
| 230 // Adds a trace event with a given id, thread_id, and timestamp. Not | |
| 231 // Implemented. | |
| 232 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ | |
| 233 phase, category_group, name, id, thread_id, timestamp, flags, ...) \ | |
| 234 UNIMPLEMENTED() | |
| 235 | |
| 236 #ifndef INTERNAL_GET_TRACE_CONTEXT_ID | |
| 237 #define INTERNAL_GET_TRACE_CONTEXT_ID \ | |
| 238 (uint64_t) v8::internal::Isolate::UnsafeCurrent | |
|
jochen (gone - plz use gerrit)
2015/09/08 14:20:34
don't use this method.
Isolate* should be always
| |
| 239 #endif | |
| 240 | |
| 241 namespace v8 { | |
| 242 namespace internal { | |
| 243 namespace tracing { | |
| 244 | |
| 245 // Specify these values when the corresponding argument of AddTraceEvent is not | |
| 246 // used. | |
| 247 const int kZeroNumArgs = 0; | |
| 248 const uint64_t kNoId = 0; | |
| 249 | |
| 250 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers | |
| 251 // are by default mangled with the Process ID so that they are unlikely to | |
| 252 // collide when the same pointer is used on different processes. | |
| 253 class TraceID { | |
| 254 public: | |
| 255 class DontMangle { | |
| 256 public: | |
| 257 explicit DontMangle(const void* id) | |
| 258 : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {} | |
| 259 explicit DontMangle(uint64_t id) : data_(id) {} | |
| 260 explicit DontMangle(unsigned int id) : data_(id) {} | |
| 261 explicit DontMangle(uint16_t id) : data_(id) {} | |
| 262 explicit DontMangle(unsigned char id) : data_(id) {} | |
| 263 explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {} | |
| 264 explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {} | |
| 265 explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {} | |
| 266 explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {} | |
| 267 uint64_t data() const { return data_; } | |
| 268 | |
| 269 private: | |
| 270 uint64_t data_; | |
| 271 }; | |
| 272 | |
| 273 class ForceMangle { | |
| 274 public: | |
| 275 explicit ForceMangle(uint64_t id) : data_(id) {} | |
| 276 explicit ForceMangle(unsigned int id) : data_(id) {} | |
| 277 explicit ForceMangle(uint16_t id) : data_(id) {} | |
| 278 explicit ForceMangle(unsigned char id) : data_(id) {} | |
| 279 explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {} | |
| 280 explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {} | |
| 281 explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {} | |
| 282 explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {} | |
| 283 uint64_t data() const { return data_; } | |
| 284 | |
| 285 private: | |
| 286 uint64_t data_; | |
| 287 }; | |
| 288 | |
| 289 TraceID(const void* id, unsigned int* flags) | |
| 290 : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) { | |
| 291 *flags |= TRACE_EVENT_FLAG_MANGLE_ID; | |
| 292 } | |
| 293 TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) { | |
| 294 *flags |= TRACE_EVENT_FLAG_MANGLE_ID; | |
| 295 } | |
| 296 TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {} | |
| 297 TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; } | |
| 298 TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; } | |
| 299 TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; } | |
| 300 TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; } | |
| 301 TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { | |
| 302 (void)flags; | |
| 303 } | |
| 304 TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { | |
| 305 (void)flags; | |
| 306 } | |
| 307 TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { | |
| 308 (void)flags; | |
| 309 } | |
| 310 TraceID(signed char id, unsigned int* flags) | |
| 311 : data_(static_cast<uint64_t>(id)) { | |
| 312 (void)flags; | |
| 313 } | |
| 314 | |
| 315 uint64_t data() const { return data_; } | |
| 316 | |
| 317 private: | |
| 318 uint64_t data_; | |
| 319 }; | |
| 320 | |
| 321 // Simple union to store various types as uint64_t. | |
| 322 union TraceValueUnion { | |
| 323 bool as_bool; | |
| 324 uint64_t as_uint; | |
| 325 int64_t as_int; | |
| 326 double as_double; | |
| 327 const void* as_pointer; | |
| 328 const char* as_string; | |
| 329 }; | |
| 330 | |
| 331 // Simple container for const char* that should be copied instead of retained. | |
| 332 class TraceStringWithCopy { | |
| 333 public: | |
| 334 explicit TraceStringWithCopy(const char* str) : str_(str) {} | |
| 335 operator const char*() const { return str_; } | |
| 336 | |
| 337 private: | |
| 338 const char* str_; | |
| 339 }; | |
| 340 | |
| 341 // Define SetTraceValue for each allowed type. It stores the type and | |
| 342 // value in the return arguments. This allows this API to avoid declaring any | |
| 343 // structures so that it is portable to third_party libraries. | |
| 344 #define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \ | |
| 345 value_type_id) \ | |
| 346 static inline void SetTraceValue(actual_type arg, unsigned char* type, \ | |
| 347 uint64_t* value) { \ | |
| 348 TraceValueUnion type_value; \ | |
| 349 type_value.union_member = arg; \ | |
| 350 *type = value_type_id; \ | |
| 351 *value = type_value.as_uint; \ | |
| 352 } | |
| 353 // Simpler form for int types that can be safely casted. | |
| 354 #define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \ | |
| 355 static inline void SetTraceValue(actual_type arg, unsigned char* type, \ | |
| 356 uint64_t* value) { \ | |
| 357 *type = value_type_id; \ | |
| 358 *value = static_cast<uint64_t>(arg); \ | |
| 359 } | |
| 360 | |
| 361 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT) | |
| 362 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT) | |
| 363 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT) | |
| 364 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT) | |
| 365 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT) | |
| 366 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT) | |
| 367 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT) | |
| 368 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT) | |
| 369 INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL) | |
| 370 INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE) | |
| 371 INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, | |
| 372 TRACE_VALUE_TYPE_POINTER) | |
| 373 INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, | |
| 374 TRACE_VALUE_TYPE_STRING) | |
| 375 INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string, | |
| 376 TRACE_VALUE_TYPE_COPY_STRING) | |
| 377 | |
| 378 #undef INTERNAL_DECLARE_SET_TRACE_VALUE | |
| 379 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT | |
| 380 | |
| 381 // These AddTraceEvent template | |
| 382 // function is defined here instead of in the macro, because the arg_values | |
| 383 // could be temporary objects, such as std::string. In order to store | |
| 384 // pointers to the internal c_str and pass through to the tracing API, | |
| 385 // the arg_values must live throughout these procedures. | |
| 386 | |
| 387 static inline EventTracer::Handle AddTraceEvent( | |
| 388 char phase, const uint8_t* category_group_enabled, const char* name, | |
| 389 uint64_t id, uint64_t bind_id, unsigned int flags) { | |
| 390 return TRACE_EVENT_API_ADD_TRACE_EVENT( | |
| 391 phase, category_group_enabled, name, id, INTERNAL_GET_TRACE_CONTEXT_ID(), | |
| 392 bind_id, kZeroNumArgs, NULL, NULL, NULL, | |
| 393 flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID); | |
| 394 } | |
| 395 | |
| 396 template <class ARG1_TYPE> | |
| 397 static inline EventTracer::Handle AddTraceEvent( | |
| 398 char phase, const uint8_t* category_group_enabled, const char* name, | |
| 399 uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name, | |
| 400 const ARG1_TYPE& arg1_val) { | |
| 401 const int num_args = 1; | |
| 402 uint8_t arg_types[1]; | |
| 403 uint64_t arg_values[1]; | |
| 404 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); | |
| 405 return TRACE_EVENT_API_ADD_TRACE_EVENT( | |
| 406 phase, category_group_enabled, name, id, INTERNAL_GET_TRACE_CONTEXT_ID(), | |
| 407 bind_id, num_args, &arg1_name, arg_types, arg_values, | |
| 408 flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID); | |
| 409 } | |
| 410 | |
| 411 template <class ARG1_TYPE, class ARG2_TYPE> | |
| 412 static inline EventTracer::Handle AddTraceEvent( | |
| 413 char phase, const uint8_t* category_group_enabled, const char* name, | |
| 414 uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name, | |
| 415 const ARG1_TYPE& arg1_val, const char* arg2_name, | |
| 416 const ARG2_TYPE& arg2_val) { | |
| 417 const int num_args = 2; | |
| 418 const char* arg_names[2] = {arg1_name, arg2_name}; | |
| 419 unsigned char arg_types[2]; | |
| 420 uint64_t arg_values[2]; | |
| 421 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); | |
| 422 SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]); | |
| 423 return TRACE_EVENT_API_ADD_TRACE_EVENT( | |
| 424 phase, category_group_enabled, name, id, INTERNAL_GET_TRACE_CONTEXT_ID(), | |
| 425 bind_id, num_args, arg_names, arg_types, arg_values, | |
| 426 flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID); | |
| 427 } | |
| 428 | |
| 429 // Used by TRACE_EVENTx macros. Do not use directly. | |
| 430 class ScopedTracer { | |
| 431 public: | |
| 432 // Note: members of data_ intentionally left uninitialized. See Initialize. | |
| 433 ScopedTracer() : p_data_(NULL) {} | |
| 434 | |
| 435 ~ScopedTracer() { | |
| 436 if (p_data_ && *data_.category_group_enabled) | |
| 437 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( | |
| 438 data_.category_group_enabled, data_.name, data_.event_handle); | |
| 439 } | |
| 440 | |
| 441 void Initialize(const uint8_t* category_group_enabled, const char* name, | |
| 442 EventTracer::Handle event_handle) { | |
| 443 data_.category_group_enabled = category_group_enabled; | |
| 444 data_.name = name; | |
| 445 data_.event_handle = event_handle; | |
| 446 p_data_ = &data_; | |
| 447 } | |
| 448 | |
| 449 private: | |
| 450 // This Data struct workaround is to avoid initializing all the members | |
| 451 // in Data during construction of this object, since this object is always | |
| 452 // constructed, even when tracing is disabled. If the members of Data were | |
| 453 // members of this class instead, compiler warnings occur about potential | |
| 454 // uninitialized accesses. | |
| 455 struct Data { | |
| 456 const uint8_t* category_group_enabled; | |
| 457 const char* name; | |
| 458 EventTracer::Handle event_handle; | |
| 459 }; | |
| 460 Data* p_data_; | |
| 461 Data data_; | |
| 462 }; | |
| 463 | |
| 464 // Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly. | |
| 465 class ScopedTraceBinaryEfficient { | |
| 466 public: | |
| 467 ScopedTraceBinaryEfficient(const char* category_group, const char* name); | |
| 468 ~ScopedTraceBinaryEfficient(); | |
| 469 | |
| 470 private: | |
| 471 const uint8_t* category_group_enabled_; | |
| 472 const char* name_; | |
| 473 EventTracer::Handle event_handle_; | |
| 474 }; | |
| 475 | |
| 476 // TraceEventSamplingStateScope records the current sampling state | |
| 477 // and sets a new sampling state. When the scope exists, it restores | |
| 478 // the sampling state having recorded. | |
| 479 template <size_t BucketNumber> | |
| 480 class TraceEventSamplingStateScope { | |
| 481 public: | |
| 482 explicit TraceEventSamplingStateScope(const char* category_and_name) { | |
| 483 previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current(); | |
| 484 TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name); | |
| 485 } | |
| 486 | |
| 487 ~TraceEventSamplingStateScope() { | |
| 488 TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_); | |
| 489 } | |
| 490 | |
| 491 static inline const char* Current() { | |
| 492 return reinterpret_cast<const char*>( | |
| 493 TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber])); | |
| 494 } | |
| 495 | |
| 496 static inline void Set(const char* category_and_name) { | |
| 497 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], | |
| 498 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( | |
| 499 const_cast<char*>(category_and_name))); | |
| 500 } | |
| 501 | |
| 502 private: | |
| 503 const char* previous_state_; | |
| 504 }; | |
| 505 | |
| 506 } // namespace tracing | |
| 507 } // namespace internal | |
| 508 } // namespace v8 | |
| 509 | |
| 510 #endif // SRC_TRACING_TRACE_EVENT_H_ | |
| OLD | NEW |