OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ |
| 6 #define SRC_TRACING_TRACE_EVENT_H_ |
| 7 |
| 8 #include "src/v8.h" |
| 9 #include "src/tracing/trace-event-common.h" |
| 10 // This header file defines implementation details of how the trace macros in |
| 11 // trace_event_common.h collect and store trace events. Anything not |
| 12 // implementation-specific should go in trace_macros_common.h instead of here. |
| 13 |
| 14 |
| 15 // The pointer returned from GetCategoryGroupEnabled() points to a |
| 16 // value with zero or more of the following bits. Used in this class only. |
| 17 // The TRACE_EVENT macros should only use the value as a bool. |
| 18 // These values must be in sync with macro values in trace_log.h in |
| 19 // chromium. |
| 20 enum CategoryGroupEnabledFlags { |
| 21 // Category group enabled for the recording mode. |
| 22 kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0, |
| 23 // Category group enabled for the monitoring mode. |
| 24 kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1, |
| 25 // Category group enabled by SetEventCallbackEnabled(). |
| 26 kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2, |
| 27 // Category group enabled to export events to ETW. |
| 28 kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3, |
| 29 }; |
| 30 |
| 31 // By default, const char* asrgument values are assumed to have long-lived scope |
| 32 // and will not be copied. Use this macro to force a const char* to be copied. |
| 33 #define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str) |
| 34 |
| 35 // By default, uint64 ID argument values are not mangled with the Process ID in |
| 36 // TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling. |
| 37 #define TRACE_ID_MANGLE(id) v8::internal::tracing::TraceID::ForceMangle(id) |
| 38 |
| 39 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC |
| 40 // macros. Use this macro to prevent Process ID mangling. |
| 41 #define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id) |
| 42 |
| 43 // Sets the current sample state to the given category and name (both must be |
| 44 // constant strings). These states are intended for a sampling profiler. |
| 45 // Implementation note: we store category and name together because we don't |
| 46 // want the inconsistency/expense of storing two pointers. |
| 47 // |thread_bucket| is [0..2] and is used to statically isolate samples in one |
| 48 // thread from others. |
| 49 #define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ |
| 50 name) \ |
| 51 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \ |
| 52 category "\0" name) |
| 53 |
| 54 // Returns a current sampling state of the given bucket. |
| 55 #define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \ |
| 56 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current() |
| 57 |
| 58 // Creates a scope of a sampling state of the given bucket. |
| 59 // |
| 60 // { // The sampling state is set within this scope. |
| 61 // TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name"); |
| 62 // ...; |
| 63 // } |
| 64 #define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ |
| 65 name) \ |
| 66 trace_event_internal::TraceEventSamplingStateScope<bucket_number> \ |
| 67 traceEventSamplingScope(category "\0" name); |
| 68 |
| 69 |
| 70 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ |
| 71 *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ |
| 72 (kEnabledForRecording_CategoryGroupEnabledFlags | \ |
| 73 kEnabledForEventCallback_CategoryGroupEnabledFlags) |
| 74 |
| 75 // The following macro has no implementation, but it needs to exist since |
| 76 // it gets called from scoped trace events. It cannot call UNIMPLEMENTED() |
| 77 // since an empty implementation is a valid one. |
| 78 #define INTERNAL_TRACE_MEMORY(category, name) |
| 79 |
| 80 //////////////////////////////////////////////////////////////////////////////// |
| 81 // Implementation specific tracing API definitions. |
| 82 |
| 83 // Get a pointer to the enabled state of the given trace category. Only |
| 84 // long-lived literal strings should be given as the category group. The |
| 85 // returned pointer can be held permanently in a local static for example. If |
| 86 // the unsigned char is non-zero, tracing is enabled. If tracing is enabled, |
| 87 // TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled |
| 88 // between the load of the tracing state and the call to |
| 89 // TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out |
| 90 // for best performance when tracing is disabled. |
| 91 // const uint8_t* |
| 92 // TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group) |
| 93 #define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \ |
| 94 v8::internal::V8::GetCurrentPlatform()->GetCategoryGroupEnabled |
| 95 |
| 96 // Get the number of times traces have been recorded. This is used to implement |
| 97 // the TRACE_EVENT_IS_NEW_TRACE facility. |
| 98 // unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED() |
| 99 #define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \ |
| 100 v8::internal::V8::GetCurrentPlatform()->getNumTracesRecorded |
| 101 |
| 102 // Add a trace event to the platform tracing system. |
| 103 // uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT( |
| 104 // char phase, |
| 105 // const uint8_t* category_group_enabled, |
| 106 // const char* name, |
| 107 // uint64_t id, |
| 108 // uint64_t context_id, |
| 109 // uint64_t bind_id, |
| 110 // int num_args, |
| 111 // const char** arg_names, |
| 112 // const uint8_t* arg_types, |
| 113 // const uint64_t* arg_values, |
| 114 // unsigned int flags) |
| 115 #define TRACE_EVENT_API_ADD_TRACE_EVENT \ |
| 116 v8::internal::V8::GetCurrentPlatform()->AddTraceEvent |
| 117 |
| 118 // Set the duration field of a COMPLETE trace event. |
| 119 // void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( |
| 120 // const uint8_t* category_group_enabled, |
| 121 // const char* name, |
| 122 // uint64_t id) |
| 123 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ |
| 124 v8::internal::V8::GetCurrentPlatform()->UpdateTraceEventDuration |
| 125 |
| 126 // Defines atomic operations used internally by the tracing system. |
| 127 #define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord |
| 128 #define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var)) |
| 129 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \ |
| 130 v8::base::NoBarrier_Store(&(var), (value)) |
| 131 |
| 132 // The thread buckets for the sampling profiler. |
| 133 extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; |
| 134 |
| 135 #define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \ |
| 136 g_trace_state[thread_bucket] |
| 137 |
| 138 //////////////////////////////////////////////////////////////////////////////// |
| 139 |
| 140 // Implementation detail: trace event macros create temporary variables |
| 141 // to keep instrumentation overhead low. These macros give each temporary |
| 142 // variable a unique name based on the line number to prevent name collisions. |
| 143 #define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b |
| 144 #define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) |
| 145 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \ |
| 146 INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) |
| 147 |
| 148 // Implementation detail: internal macro to create static category. |
| 149 // No barriers are needed, because this code is designed to operate safely |
| 150 // even when the unsigned char* points to garbage data (which may be the case |
| 151 // on processors without cache coherency). |
| 152 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ |
| 153 category_group, atomic, category_group_enabled) \ |
| 154 category_group_enabled = \ |
| 155 reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \ |
| 156 if (!category_group_enabled) { \ |
| 157 category_group_enabled = \ |
| 158 TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \ |
| 159 TRACE_EVENT_API_ATOMIC_STORE( \ |
| 160 atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \ |
| 161 category_group_enabled)); \ |
| 162 } |
| 163 |
| 164 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \ |
| 165 static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \ |
| 166 const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \ |
| 167 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ |
| 168 category_group, INTERNAL_TRACE_EVENT_UID(atomic), \ |
| 169 INTERNAL_TRACE_EVENT_UID(category_group_enabled)); |
| 170 |
| 171 // Implementation detail: internal macro to create static category and add |
| 172 // event if the category is enabled. |
| 173 #define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \ |
| 174 do { \ |
| 175 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 176 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 177 v8::internal::tracing::AddTraceEvent( \ |
| 178 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 179 v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \ |
| 180 v8::internal::tracing::kNoId, flags, ##__VA_ARGS__); \ |
| 181 } \ |
| 182 } while (0) |
| 183 |
| 184 #define INTERNAL_TRACE_EVENT_ADD_WITH_CONTEXT_ID(phase, category_group, name, \ |
| 185 context_id, flags, ...) \ |
| 186 do { \ |
| 187 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 188 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 189 v8::internal::tracing::AddTraceEvent( \ |
| 190 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 191 v8::internal::tracing::kNoId, context_id, \ |
| 192 v8::internal::tracing::kNoId, flags, ##__VA_ARGS__); \ |
| 193 } \ |
| 194 } while (0) |
| 195 |
| 196 // Implementation detail: internal macro to create static category and add begin |
| 197 // event if the category is enabled. Also adds the end event when the scope |
| 198 // ends. |
| 199 #define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \ |
| 200 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 201 v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ |
| 202 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 203 uint64_t h = v8::internal::tracing::AddTraceEvent( \ |
| 204 TRACE_EVENT_PHASE_COMPLETE, \ |
| 205 INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 206 v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \ |
| 207 v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \ |
| 208 INTERNAL_TRACE_EVENT_UID(tracer) \ |
| 209 .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 210 h); \ |
| 211 } |
| 212 |
| 213 #define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \ |
| 214 bind_id, flow_flags, ...) \ |
| 215 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 216 v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ |
| 217 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 218 unsigned int trace_event_flags = flow_flags; \ |
| 219 v8::internal::tracing::TraceID trace_event_bind_id(bind_id, \ |
| 220 &trace_event_flags); \ |
| 221 uint64_t h = v8::internal::tracing::AddTraceEvent( \ |
| 222 TRACE_EVENT_PHASE_COMPLETE, \ |
| 223 INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 224 v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \ |
| 225 trace_event_bind_id.data(), trace_event_flags, ##__VA_ARGS__); \ |
| 226 INTERNAL_TRACE_EVENT_UID(tracer) \ |
| 227 .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 228 h); \ |
| 229 } |
| 230 |
| 231 // Implementation detail: internal macro to create static category and add |
| 232 // event if the category is enabled. |
| 233 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \ |
| 234 flags, ...) \ |
| 235 do { \ |
| 236 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 237 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 238 unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ |
| 239 v8::internal::tracing::TraceID trace_event_trace_id(id, \ |
| 240 &trace_event_flags); \ |
| 241 v8::internal::tracing::AddTraceEvent( \ |
| 242 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ |
| 243 trace_event_trace_id.data(), v8::internal::tracing::kNoId, \ |
| 244 v8::internal::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \ |
| 245 } \ |
| 246 } while (0) |
| 247 |
| 248 // Adds a trace event with a given timestamp. Not Implemented. |
| 249 #define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \ |
| 250 timestamp, flags, ...) \ |
| 251 UNIMPLEMENTED() |
| 252 |
| 253 // Adds a trace event with a given id and timestamp. Not Implemented. |
| 254 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \ |
| 255 phase, category_group, name, id, timestamp, flags, ...) \ |
| 256 UNIMPLEMENTED() |
| 257 |
| 258 // Adds a trace event with a given id, thread_id, and timestamp. Not |
| 259 // Implemented. |
| 260 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ |
| 261 phase, category_group, name, id, thread_id, timestamp, flags, ...) \ |
| 262 UNIMPLEMENTED() |
| 263 |
| 264 namespace v8 { |
| 265 namespace internal { |
| 266 namespace tracing { |
| 267 |
| 268 // Specify these values when the corresponding argument of AddTraceEvent is not |
| 269 // used. |
| 270 const int kZeroNumArgs = 0; |
| 271 const uint64_t kNoId = 0; |
| 272 |
| 273 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers |
| 274 // are by default mangled with the Process ID so that they are unlikely to |
| 275 // collide when the same pointer is used on different processes. |
| 276 class TraceID { |
| 277 public: |
| 278 class DontMangle { |
| 279 public: |
| 280 explicit DontMangle(const void* id) |
| 281 : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {} |
| 282 explicit DontMangle(uint64_t id) : data_(id) {} |
| 283 explicit DontMangle(unsigned int id) : data_(id) {} |
| 284 explicit DontMangle(uint16_t id) : data_(id) {} |
| 285 explicit DontMangle(unsigned char id) : data_(id) {} |
| 286 explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {} |
| 287 explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {} |
| 288 explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {} |
| 289 explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {} |
| 290 uint64_t data() const { return data_; } |
| 291 |
| 292 private: |
| 293 uint64_t data_; |
| 294 }; |
| 295 |
| 296 class ForceMangle { |
| 297 public: |
| 298 explicit ForceMangle(uint64_t id) : data_(id) {} |
| 299 explicit ForceMangle(unsigned int id) : data_(id) {} |
| 300 explicit ForceMangle(uint16_t id) : data_(id) {} |
| 301 explicit ForceMangle(unsigned char id) : data_(id) {} |
| 302 explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {} |
| 303 explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {} |
| 304 explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {} |
| 305 explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {} |
| 306 uint64_t data() const { return data_; } |
| 307 |
| 308 private: |
| 309 uint64_t data_; |
| 310 }; |
| 311 |
| 312 TraceID(const void* id, unsigned int* flags) |
| 313 : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) { |
| 314 *flags |= TRACE_EVENT_FLAG_MANGLE_ID; |
| 315 } |
| 316 TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) { |
| 317 *flags |= TRACE_EVENT_FLAG_MANGLE_ID; |
| 318 } |
| 319 TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {} |
| 320 TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; } |
| 321 TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; } |
| 322 TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; } |
| 323 TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; } |
| 324 TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { |
| 325 (void)flags; |
| 326 } |
| 327 TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { |
| 328 (void)flags; |
| 329 } |
| 330 TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) { |
| 331 (void)flags; |
| 332 } |
| 333 TraceID(signed char id, unsigned int* flags) |
| 334 : data_(static_cast<uint64_t>(id)) { |
| 335 (void)flags; |
| 336 } |
| 337 |
| 338 uint64_t data() const { return data_; } |
| 339 |
| 340 private: |
| 341 uint64_t data_; |
| 342 }; |
| 343 |
| 344 // Simple union to store various types as uint64_t. |
| 345 union TraceValueUnion { |
| 346 bool as_bool; |
| 347 uint64_t as_uint; |
| 348 int64_t as_int; |
| 349 double as_double; |
| 350 const void* as_pointer; |
| 351 const char* as_string; |
| 352 }; |
| 353 |
| 354 // Simple container for const char* that should be copied instead of retained. |
| 355 class TraceStringWithCopy { |
| 356 public: |
| 357 explicit TraceStringWithCopy(const char* str) : str_(str) {} |
| 358 operator const char*() const { return str_; } |
| 359 |
| 360 private: |
| 361 const char* str_; |
| 362 }; |
| 363 |
| 364 // Define SetTraceValue for each allowed type. It stores the type and |
| 365 // value in the return arguments. This allows this API to avoid declaring any |
| 366 // structures so that it is portable to third_party libraries. |
| 367 #define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \ |
| 368 value_type_id) \ |
| 369 static inline void SetTraceValue(actual_type arg, unsigned char* type, \ |
| 370 uint64_t* value) { \ |
| 371 TraceValueUnion type_value; \ |
| 372 type_value.union_member = arg; \ |
| 373 *type = value_type_id; \ |
| 374 *value = type_value.as_uint; \ |
| 375 } |
| 376 // Simpler form for int types that can be safely casted. |
| 377 #define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \ |
| 378 static inline void SetTraceValue(actual_type arg, unsigned char* type, \ |
| 379 uint64_t* value) { \ |
| 380 *type = value_type_id; \ |
| 381 *value = static_cast<uint64_t>(arg); \ |
| 382 } |
| 383 |
| 384 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT) |
| 385 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT) |
| 386 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT) |
| 387 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT) |
| 388 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT) |
| 389 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT) |
| 390 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT) |
| 391 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT) |
| 392 INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL) |
| 393 INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE) |
| 394 INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, |
| 395 TRACE_VALUE_TYPE_POINTER) |
| 396 INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, |
| 397 TRACE_VALUE_TYPE_STRING) |
| 398 INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string, |
| 399 TRACE_VALUE_TYPE_COPY_STRING) |
| 400 |
| 401 #undef INTERNAL_DECLARE_SET_TRACE_VALUE |
| 402 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT |
| 403 |
| 404 // These AddTraceEvent template |
| 405 // function is defined here instead of in the macro, because the arg_values |
| 406 // could be temporary objects, such as std::string. In order to store |
| 407 // pointers to the internal c_str and pass through to the tracing API, |
| 408 // the arg_values must live throughout these procedures. |
| 409 |
| 410 static inline uint64_t AddTraceEvent(char phase, |
| 411 const uint8_t* category_group_enabled, |
| 412 const char* name, uint64_t id, |
| 413 uint64_t context_id, uint64_t bind_id, |
| 414 unsigned int flags) { |
| 415 return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name, |
| 416 id, context_id, bind_id, kZeroNumArgs, |
| 417 NULL, NULL, NULL, flags); |
| 418 } |
| 419 |
| 420 template <class ARG1_TYPE> |
| 421 static inline uint64_t AddTraceEvent(char phase, |
| 422 const uint8_t* category_group_enabled, |
| 423 const char* name, uint64_t id, |
| 424 uint64_t context_id, uint64_t bind_id, |
| 425 unsigned int flags, const char* arg1_name, |
| 426 const ARG1_TYPE& arg1_val) { |
| 427 const int num_args = 1; |
| 428 uint8_t arg_types[1]; |
| 429 uint64_t arg_values[1]; |
| 430 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); |
| 431 return TRACE_EVENT_API_ADD_TRACE_EVENT( |
| 432 phase, category_group_enabled, name, id, context_id, bind_id, num_args, |
| 433 &arg1_name, arg_types, arg_values, flags); |
| 434 } |
| 435 |
| 436 template <class ARG1_TYPE, class ARG2_TYPE> |
| 437 static inline uint64_t AddTraceEvent( |
| 438 char phase, const uint8_t* category_group_enabled, const char* name, |
| 439 uint64_t id, uint64_t context_id, uint64_t bind_id, unsigned int flags, |
| 440 const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name, |
| 441 const ARG2_TYPE& arg2_val) { |
| 442 const int num_args = 2; |
| 443 const char* arg_names[2] = {arg1_name, arg2_name}; |
| 444 unsigned char arg_types[2]; |
| 445 uint64_t arg_values[2]; |
| 446 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); |
| 447 SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]); |
| 448 return TRACE_EVENT_API_ADD_TRACE_EVENT( |
| 449 phase, category_group_enabled, name, id, context_id, bind_id, num_args, |
| 450 arg_names, arg_types, arg_values, flags); |
| 451 } |
| 452 |
| 453 // Used by TRACE_EVENTx macros. Do not use directly. |
| 454 class ScopedTracer { |
| 455 public: |
| 456 // Note: members of data_ intentionally left uninitialized. See Initialize. |
| 457 ScopedTracer() : p_data_(NULL) {} |
| 458 |
| 459 ~ScopedTracer() { |
| 460 if (p_data_ && *data_.category_group_enabled) |
| 461 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( |
| 462 data_.category_group_enabled, data_.name, data_.event_handle); |
| 463 } |
| 464 |
| 465 void Initialize(const uint8_t* category_group_enabled, const char* name, |
| 466 uint64_t event_handle) { |
| 467 data_.category_group_enabled = category_group_enabled; |
| 468 data_.name = name; |
| 469 data_.event_handle = event_handle; |
| 470 p_data_ = &data_; |
| 471 } |
| 472 |
| 473 private: |
| 474 // This Data struct workaround is to avoid initializing all the members |
| 475 // in Data during construction of this object, since this object is always |
| 476 // constructed, even when tracing is disabled. If the members of Data were |
| 477 // members of this class instead, compiler warnings occur about potential |
| 478 // uninitialized accesses. |
| 479 struct Data { |
| 480 const uint8_t* category_group_enabled; |
| 481 const char* name; |
| 482 uint64_t event_handle; |
| 483 }; |
| 484 Data* p_data_; |
| 485 Data data_; |
| 486 }; |
| 487 |
| 488 // Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly. |
| 489 class ScopedTraceBinaryEfficient { |
| 490 public: |
| 491 ScopedTraceBinaryEfficient(const char* category_group, const char* name); |
| 492 ~ScopedTraceBinaryEfficient(); |
| 493 |
| 494 private: |
| 495 const uint8_t* category_group_enabled_; |
| 496 const char* name_; |
| 497 uint64_t event_handle_; |
| 498 }; |
| 499 |
| 500 // TraceEventSamplingStateScope records the current sampling state |
| 501 // and sets a new sampling state. When the scope exists, it restores |
| 502 // the sampling state having recorded. |
| 503 template <size_t BucketNumber> |
| 504 class TraceEventSamplingStateScope { |
| 505 public: |
| 506 explicit TraceEventSamplingStateScope(const char* category_and_name) { |
| 507 previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current(); |
| 508 TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name); |
| 509 } |
| 510 |
| 511 ~TraceEventSamplingStateScope() { |
| 512 TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_); |
| 513 } |
| 514 |
| 515 static inline const char* Current() { |
| 516 return reinterpret_cast<const char*>( |
| 517 TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber])); |
| 518 } |
| 519 |
| 520 static inline void Set(const char* category_and_name) { |
| 521 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], |
| 522 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( |
| 523 const_cast<char*>(category_and_name))); |
| 524 } |
| 525 |
| 526 private: |
| 527 const char* previous_state_; |
| 528 }; |
| 529 |
| 530 } // namespace tracing |
| 531 } // namespace internal |
| 532 } // namespace v8 |
| 533 |
| 534 #endif // SRC_TRACING_TRACE_EVENT_H_ |
OLD | NEW |