| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_event_impl.h" | 5 #include "base/trace_event/trace_event_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 | 9 |
| 10 #include "base/base_switches.h" | 10 #include "base/base_switches.h" |
| (...skipping 15 matching lines...) Expand all Loading... |
| 26 #include "base/strings/utf_string_conversions.h" | 26 #include "base/strings/utf_string_conversions.h" |
| 27 #include "base/synchronization/cancellation_flag.h" | 27 #include "base/synchronization/cancellation_flag.h" |
| 28 #include "base/synchronization/waitable_event.h" | 28 #include "base/synchronization/waitable_event.h" |
| 29 #include "base/sys_info.h" | 29 #include "base/sys_info.h" |
| 30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
| 31 #include "base/thread_task_runner_handle.h" | 31 #include "base/thread_task_runner_handle.h" |
| 32 #include "base/threading/platform_thread.h" | 32 #include "base/threading/platform_thread.h" |
| 33 #include "base/threading/thread_id_name_manager.h" | 33 #include "base/threading/thread_id_name_manager.h" |
| 34 #include "base/threading/worker_pool.h" | 34 #include "base/threading/worker_pool.h" |
| 35 #include "base/time/time.h" | 35 #include "base/time/time.h" |
| 36 #include "base/trace_event/memory_dump_manager.h" |
| 37 #include "base/trace_event/memory_dump_provider.h" |
| 38 #include "base/trace_event/process_memory_dump.h" |
| 36 #include "base/trace_event/trace_event.h" | 39 #include "base/trace_event/trace_event.h" |
| 37 #include "base/trace_event/trace_event_synthetic_delay.h" | 40 #include "base/trace_event/trace_event_synthetic_delay.h" |
| 38 | 41 |
| 39 #if defined(OS_WIN) | 42 #if defined(OS_WIN) |
| 40 #include "base/trace_event/trace_event_etw_export_win.h" | 43 #include "base/trace_event/trace_event_etw_export_win.h" |
| 41 #include "base/trace_event/trace_event_win.h" | 44 #include "base/trace_event/trace_event_win.h" |
| 42 #endif | 45 #endif |
| 43 | 46 |
| 44 class DeleteTraceLogForTesting { | 47 class DeleteTraceLogForTesting { |
| 45 public: | 48 public: |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 207 queue_index = NextQueueIndex(queue_index)) { | 210 queue_index = NextQueueIndex(queue_index)) { |
| 208 size_t chunk_index = recyclable_chunks_queue_[queue_index]; | 211 size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
| 209 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 212 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 210 continue; | 213 continue; |
| 211 TraceBufferChunk* chunk = chunks_[chunk_index]; | 214 TraceBufferChunk* chunk = chunks_[chunk_index]; |
| 212 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); | 215 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); |
| 213 } | 216 } |
| 214 return cloned_buffer.Pass(); | 217 return cloned_buffer.Pass(); |
| 215 } | 218 } |
| 216 | 219 |
| 220 void EstimateTraceMemoryOverhead( |
| 221 TraceEventMemoryOverhead* overhead) override { |
| 222 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); |
| 223 for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
| 224 queue_index = NextQueueIndex(queue_index)) { |
| 225 size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
| 226 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 227 continue; |
| 228 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); |
| 229 } |
| 230 } |
| 231 |
| 217 private: | 232 private: |
| 218 class ClonedTraceBuffer : public TraceBuffer { | 233 class ClonedTraceBuffer : public TraceBuffer { |
| 219 public: | 234 public: |
| 220 ClonedTraceBuffer() : current_iteration_index_(0) {} | 235 ClonedTraceBuffer() : current_iteration_index_(0) {} |
| 221 | 236 |
| 222 // The only implemented method. | 237 // The only implemented method. |
| 223 const TraceBufferChunk* NextChunk() override { | 238 const TraceBufferChunk* NextChunk() override { |
| 224 return current_iteration_index_ < chunks_.size() ? | 239 return current_iteration_index_ < chunks_.size() ? |
| 225 chunks_[current_iteration_index_++] : NULL; | 240 chunks_[current_iteration_index_++] : NULL; |
| 226 } | 241 } |
| 227 | 242 |
| 228 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { | 243 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
| 229 NOTIMPLEMENTED(); | 244 NOTIMPLEMENTED(); |
| 230 return scoped_ptr<TraceBufferChunk>(); | 245 return scoped_ptr<TraceBufferChunk>(); |
| 231 } | 246 } |
| 232 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { | 247 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { |
| 233 NOTIMPLEMENTED(); | 248 NOTIMPLEMENTED(); |
| 234 } | 249 } |
| 235 bool IsFull() const override { return false; } | 250 bool IsFull() const override { return false; } |
| 236 size_t Size() const override { return 0; } | 251 size_t Size() const override { return 0; } |
| 237 size_t Capacity() const override { return 0; } | 252 size_t Capacity() const override { return 0; } |
| 238 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { | 253 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
| 239 return NULL; | 254 return NULL; |
| 240 } | 255 } |
| 241 scoped_ptr<TraceBuffer> CloneForIteration() const override { | 256 scoped_ptr<TraceBuffer> CloneForIteration() const override { |
| 242 NOTIMPLEMENTED(); | 257 NOTIMPLEMENTED(); |
| 243 return scoped_ptr<TraceBuffer>(); | 258 return scoped_ptr<TraceBuffer>(); |
| 244 } | 259 } |
| 260 void EstimateTraceMemoryOverhead( |
| 261 TraceEventMemoryOverhead* overhead) override { |
| 262 NOTIMPLEMENTED(); |
| 263 } |
| 245 | 264 |
| 246 size_t current_iteration_index_; | 265 size_t current_iteration_index_; |
| 247 ScopedVector<TraceBufferChunk> chunks_; | 266 ScopedVector<TraceBufferChunk> chunks_; |
| 248 }; | 267 }; |
| 249 | 268 |
| 250 bool QueueIsEmpty() const { | 269 bool QueueIsEmpty() const { |
| 251 return queue_head_ == queue_tail_; | 270 return queue_head_ == queue_tail_; |
| 252 } | 271 } |
| 253 | 272 |
| 254 size_t QueueSize() const { | 273 size_t QueueSize() const { |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 343 return chunk; | 362 return chunk; |
| 344 } | 363 } |
| 345 return NULL; | 364 return NULL; |
| 346 } | 365 } |
| 347 | 366 |
| 348 scoped_ptr<TraceBuffer> CloneForIteration() const override { | 367 scoped_ptr<TraceBuffer> CloneForIteration() const override { |
| 349 NOTIMPLEMENTED(); | 368 NOTIMPLEMENTED(); |
| 350 return scoped_ptr<TraceBuffer>(); | 369 return scoped_ptr<TraceBuffer>(); |
| 351 } | 370 } |
| 352 | 371 |
| 372 void EstimateTraceMemoryOverhead( |
| 373 TraceEventMemoryOverhead* overhead) override { |
| 374 const size_t chunks_ptr_vector_allocated_size = |
| 375 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type); |
| 376 const size_t chunks_ptr_vector_resident_size = |
| 377 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type); |
| 378 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size, |
| 379 chunks_ptr_vector_resident_size); |
| 380 for (size_t i = 0; i < chunks_.size(); ++i) { |
| 381 TraceBufferChunk* chunk = chunks_[i]; |
| 382 // Skip the in-flight (nullptr) chunks. They will be accounted by the |
| 383 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump. |
| 384 if (chunk) |
| 385 chunk->EstimateTraceMemoryOverhead(overhead); |
| 386 } |
| 387 } |
| 388 |
| 353 private: | 389 private: |
| 354 size_t in_flight_chunk_count_; | 390 size_t in_flight_chunk_count_; |
| 355 size_t current_iteration_index_; | 391 size_t current_iteration_index_; |
| 356 size_t max_chunks_; | 392 size_t max_chunks_; |
| 357 ScopedVector<TraceBufferChunk> chunks_; | 393 ScopedVector<TraceBufferChunk> chunks_; |
| 358 | 394 |
| 359 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); | 395 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); |
| 360 }; | 396 }; |
| 361 | 397 |
| 362 template <typename T> | 398 template <typename T> |
| (...skipping 28 matching lines...) Expand all Loading... |
| 391 thread_local_boolean_->Set(false); | 427 thread_local_boolean_->Set(false); |
| 392 } | 428 } |
| 393 | 429 |
| 394 private: | 430 private: |
| 395 ThreadLocalBoolean* thread_local_boolean_; | 431 ThreadLocalBoolean* thread_local_boolean_; |
| 396 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean); | 432 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean); |
| 397 }; | 433 }; |
| 398 | 434 |
| 399 } // namespace | 435 } // namespace |
| 400 | 436 |
| 437 TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {} |
| 438 |
| 439 TraceBufferChunk::~TraceBufferChunk() {} |
| 440 |
| 401 void TraceBufferChunk::Reset(uint32 new_seq) { | 441 void TraceBufferChunk::Reset(uint32 new_seq) { |
| 402 for (size_t i = 0; i < next_free_; ++i) | 442 for (size_t i = 0; i < next_free_; ++i) |
| 403 chunk_[i].Reset(); | 443 chunk_[i].Reset(); |
| 404 next_free_ = 0; | 444 next_free_ = 0; |
| 405 seq_ = new_seq; | 445 seq_ = new_seq; |
| 446 cached_overhead_estimate_when_full_.reset(); |
| 406 } | 447 } |
| 407 | 448 |
| 408 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { | 449 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { |
| 409 DCHECK(!IsFull()); | 450 DCHECK(!IsFull()); |
| 410 *event_index = next_free_++; | 451 *event_index = next_free_++; |
| 411 return &chunk_[*event_index]; | 452 return &chunk_[*event_index]; |
| 412 } | 453 } |
| 413 | 454 |
| 414 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { | 455 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { |
| 415 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); | 456 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); |
| 416 cloned_chunk->next_free_ = next_free_; | 457 cloned_chunk->next_free_ = next_free_; |
| 417 for (size_t i = 0; i < next_free_; ++i) | 458 for (size_t i = 0; i < next_free_; ++i) |
| 418 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); | 459 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); |
| 419 return cloned_chunk.Pass(); | 460 return cloned_chunk.Pass(); |
| 420 } | 461 } |
| 421 | 462 |
| 463 void TraceBufferChunk::EstimateTraceMemoryOverhead( |
| 464 TraceEventMemoryOverhead* overhead) { |
| 465 if (cached_overhead_estimate_when_full_) { |
| 466 DCHECK(IsFull()); |
| 467 overhead->Update(*cached_overhead_estimate_when_full_); |
| 468 return; |
| 469 } |
| 470 |
| 471 // Cache the memory overhead estimate only if the chunk is full. |
| 472 TraceEventMemoryOverhead* estimate = overhead; |
| 473 if (IsFull()) { |
| 474 cached_overhead_estimate_when_full_.reset(new TraceEventMemoryOverhead); |
| 475 estimate = cached_overhead_estimate_when_full_.get(); |
| 476 } |
| 477 |
| 478 estimate->Add("TraceBufferChunk", sizeof(*this)); |
| 479 for (size_t i = 0; i < next_free_; ++i) |
| 480 chunk_[i].EstimateTraceMemoryOverhead(estimate); |
| 481 |
| 482 if (IsFull()) { |
| 483 estimate->AddSelf(); |
| 484 overhead->Update(*estimate); |
| 485 } |
| 486 } |
| 487 |
| 422 // A helper class that allows the lock to be acquired in the middle of the scope | 488 // A helper class that allows the lock to be acquired in the middle of the scope |
| 423 // and unlocks at the end of scope if locked. | 489 // and unlocks at the end of scope if locked. |
| 424 class TraceLog::OptionalAutoLock { | 490 class TraceLog::OptionalAutoLock { |
| 425 public: | 491 public: |
| 426 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} | 492 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} |
| 427 | 493 |
| 428 ~OptionalAutoLock() { | 494 ~OptionalAutoLock() { |
| 429 if (locked_) | 495 if (locked_) |
| 430 lock_->Release(); | 496 lock_->Release(); |
| 431 } | 497 } |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 522 ThreadTicks thread_timestamp, | 588 ThreadTicks thread_timestamp, |
| 523 char phase, | 589 char phase, |
| 524 const unsigned char* category_group_enabled, | 590 const unsigned char* category_group_enabled, |
| 525 const char* name, | 591 const char* name, |
| 526 unsigned long long id, | 592 unsigned long long id, |
| 527 int num_args, | 593 int num_args, |
| 528 const char** arg_names, | 594 const char** arg_names, |
| 529 const unsigned char* arg_types, | 595 const unsigned char* arg_types, |
| 530 const unsigned long long* arg_values, | 596 const unsigned long long* arg_values, |
| 531 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | 597 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 532 unsigned char flags) { | 598 unsigned int flags) { |
| 533 timestamp_ = timestamp; | 599 timestamp_ = timestamp; |
| 534 thread_timestamp_ = thread_timestamp; | 600 thread_timestamp_ = thread_timestamp; |
| 535 duration_ = TimeDelta::FromInternalValue(-1); | 601 duration_ = TimeDelta::FromInternalValue(-1); |
| 536 id_ = id; | 602 id_ = id; |
| 537 category_group_enabled_ = category_group_enabled; | 603 category_group_enabled_ = category_group_enabled; |
| 538 name_ = name; | 604 name_ = name; |
| 539 thread_id_ = thread_id; | 605 thread_id_ = thread_id; |
| 540 phase_ = phase; | 606 phase_ = phase; |
| 541 flags_ = flags; | 607 flags_ = flags; |
| 542 | 608 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 603 } | 669 } |
| 604 } | 670 } |
| 605 | 671 |
| 606 void TraceEvent::Reset() { | 672 void TraceEvent::Reset() { |
| 607 // Only reset fields that won't be initialized in Initialize(), or that may | 673 // Only reset fields that won't be initialized in Initialize(), or that may |
| 608 // hold references to other objects. | 674 // hold references to other objects. |
| 609 duration_ = TimeDelta::FromInternalValue(-1); | 675 duration_ = TimeDelta::FromInternalValue(-1); |
| 610 parameter_copy_storage_ = NULL; | 676 parameter_copy_storage_ = NULL; |
| 611 for (int i = 0; i < kTraceMaxNumArgs; ++i) | 677 for (int i = 0; i < kTraceMaxNumArgs; ++i) |
| 612 convertable_values_[i] = NULL; | 678 convertable_values_[i] = NULL; |
| 679 cached_memory_overhead_estimate_.reset(); |
| 613 } | 680 } |
| 614 | 681 |
| 615 void TraceEvent::UpdateDuration(const TraceTicks& now, | 682 void TraceEvent::UpdateDuration(const TraceTicks& now, |
| 616 const ThreadTicks& thread_now) { | 683 const ThreadTicks& thread_now) { |
| 617 DCHECK_EQ(duration_.ToInternalValue(), -1); | 684 DCHECK_EQ(duration_.ToInternalValue(), -1); |
| 618 duration_ = now - timestamp_; | 685 duration_ = now - timestamp_; |
| 619 thread_duration_ = thread_now - thread_timestamp_; | 686 thread_duration_ = thread_now - thread_timestamp_; |
| 620 } | 687 } |
| 621 | 688 |
| 689 void TraceEvent::EstimateTraceMemoryOverhead( |
| 690 TraceEventMemoryOverhead* overhead) { |
| 691 if (!cached_memory_overhead_estimate_) { |
| 692 cached_memory_overhead_estimate_.reset(new TraceEventMemoryOverhead); |
| 693 cached_memory_overhead_estimate_->Add("TraceEvent", sizeof(*this)); |
| 694 // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory, |
| 695 // could be shared by several events and we might overcount. In practice |
| 696 // this is unlikely but it's worth checking. |
| 697 if (parameter_copy_storage_) { |
| 698 cached_memory_overhead_estimate_->AddRefCountedString( |
| 699 *parameter_copy_storage_.get()); |
| 700 } |
| 701 for (size_t i = 0; i < kTraceMaxNumArgs; ++i) { |
| 702 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) { |
| 703 convertable_values_[i]->EstimateTraceMemoryOverhead( |
| 704 cached_memory_overhead_estimate_.get()); |
| 705 } |
| 706 } |
| 707 cached_memory_overhead_estimate_->AddSelf(); |
| 708 } |
| 709 overhead->Update(*cached_memory_overhead_estimate_); |
| 710 } |
| 711 |
| 622 // static | 712 // static |
| 623 void TraceEvent::AppendValueAsJSON(unsigned char type, | 713 void TraceEvent::AppendValueAsJSON(unsigned char type, |
| 624 TraceEvent::TraceValue value, | 714 TraceEvent::TraceValue value, |
| 625 std::string* out) { | 715 std::string* out) { |
| 626 switch (type) { | 716 switch (type) { |
| 627 case TRACE_VALUE_TYPE_BOOL: | 717 case TRACE_VALUE_TYPE_BOOL: |
| 628 *out += value.as_bool ? "true" : "false"; | 718 *out += value.as_bool ? "true" : "false"; |
| 629 break; | 719 break; |
| 630 case TRACE_VALUE_TYPE_UINT: | 720 case TRACE_VALUE_TYPE_UINT: |
| 631 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint)); | 721 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint)); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 690 const ArgumentFilterPredicate& argument_filter_predicate) const { | 780 const ArgumentFilterPredicate& argument_filter_predicate) const { |
| 691 int64 time_int64 = timestamp_.ToInternalValue(); | 781 int64 time_int64 = timestamp_.ToInternalValue(); |
| 692 int process_id = TraceLog::GetInstance()->process_id(); | 782 int process_id = TraceLog::GetInstance()->process_id(); |
| 693 const char* category_group_name = | 783 const char* category_group_name = |
| 694 TraceLog::GetCategoryGroupName(category_group_enabled_); | 784 TraceLog::GetCategoryGroupName(category_group_enabled_); |
| 695 | 785 |
| 696 // Category group checked at category creation time. | 786 // Category group checked at category creation time. |
| 697 DCHECK(!strchr(name_, '"')); | 787 DCHECK(!strchr(name_, '"')); |
| 698 StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 | 788 StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 |
| 699 "," | 789 "," |
| 700 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{", | 790 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":", |
| 701 process_id, thread_id_, time_int64, phase_, category_group_name, | 791 process_id, thread_id_, time_int64, phase_, category_group_name, |
| 702 name_); | 792 name_); |
| 703 | 793 |
| 704 // Output argument names and values, stop at first NULL argument name. | 794 // Output argument names and values, stop at first NULL argument name. |
| 705 if (arg_names_[0]) { | 795 bool strip_args = arg_names_[0] && !argument_filter_predicate.is_null() && |
| 706 bool allow_args = argument_filter_predicate.is_null() || | 796 !argument_filter_predicate.Run(category_group_name, name_); |
| 707 argument_filter_predicate.Run(category_group_name, name_); | |
| 708 | 797 |
| 709 if (allow_args) { | 798 if (strip_args) { |
| 710 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { | 799 *out += "\"__stripped__\""; |
| 711 if (i > 0) | 800 } else { |
| 712 *out += ","; | 801 *out += "{"; |
| 713 *out += "\""; | |
| 714 *out += arg_names_[i]; | |
| 715 *out += "\":"; | |
| 716 | 802 |
| 717 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) | 803 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { |
| 718 convertable_values_[i]->AppendAsTraceFormat(out); | 804 if (i > 0) |
| 719 else | 805 *out += ","; |
| 720 AppendValueAsJSON(arg_types_[i], arg_values_[i], out); | 806 *out += "\""; |
| 721 } | 807 *out += arg_names_[i]; |
| 722 } else { | 808 *out += "\":"; |
| 723 *out += "\"stripped\":1"; | 809 |
| 810 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 811 convertable_values_[i]->AppendAsTraceFormat(out); |
| 812 else |
| 813 AppendValueAsJSON(arg_types_[i], arg_values_[i], out); |
| 724 } | 814 } |
| 815 |
| 816 *out += "}"; |
| 725 } | 817 } |
| 726 | 818 |
| 727 *out += "}"; | |
| 728 | |
| 729 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) { | 819 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) { |
| 730 int64 duration = duration_.ToInternalValue(); | 820 int64 duration = duration_.ToInternalValue(); |
| 731 if (duration != -1) | 821 if (duration != -1) |
| 732 StringAppendF(out, ",\"dur\":%" PRId64, duration); | 822 StringAppendF(out, ",\"dur\":%" PRId64, duration); |
| 733 if (!thread_timestamp_.is_null()) { | 823 if (!thread_timestamp_.is_null()) { |
| 734 int64 thread_duration = thread_duration_.ToInternalValue(); | 824 int64 thread_duration = thread_duration_.ToInternalValue(); |
| 735 if (thread_duration != -1) | 825 if (thread_duration != -1) |
| 736 StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration); | 826 StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration); |
| 737 } | 827 } |
| 738 } | 828 } |
| 739 | 829 |
| 740 // Output tts if thread_timestamp is valid. | 830 // Output tts if thread_timestamp is valid. |
| 741 if (!thread_timestamp_.is_null()) { | 831 if (!thread_timestamp_.is_null()) { |
| 742 int64 thread_time_int64 = thread_timestamp_.ToInternalValue(); | 832 int64 thread_time_int64 = thread_timestamp_.ToInternalValue(); |
| 743 StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64); | 833 StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64); |
| 744 } | 834 } |
| 745 | 835 |
| 746 // Output async tts marker field if flag is set. | 836 // Output async tts marker field if flag is set. |
| 747 if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) { | 837 if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) { |
| 748 StringAppendF(out, ", \"use_async_tts\":1"); | 838 StringAppendF(out, ", \"use_async_tts\":1"); |
| 749 } | 839 } |
| 750 | 840 |
| 751 // If id_ is set, print it out as a hex string so we don't loose any | 841 // If id_ is set, print it out as a hex string so we don't loose any |
| 752 // bits (it might be a 64-bit pointer). | 842 // bits (it might be a 64-bit pointer). |
| 753 if (flags_ & TRACE_EVENT_FLAG_HAS_ID) | 843 if (flags_ & TRACE_EVENT_FLAG_HAS_ID) |
| 754 StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_)); | 844 StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_)); |
| 755 | 845 |
| 846 if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING) |
| 847 StringAppendF(out, ",\"bp\":\"e\""); |
| 848 |
| 756 // Instant events also output their scope. | 849 // Instant events also output their scope. |
| 757 if (phase_ == TRACE_EVENT_PHASE_INSTANT) { | 850 if (phase_ == TRACE_EVENT_PHASE_INSTANT) { |
| 758 char scope = '?'; | 851 char scope = '?'; |
| 759 switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) { | 852 switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) { |
| 760 case TRACE_EVENT_SCOPE_GLOBAL: | 853 case TRACE_EVENT_SCOPE_GLOBAL: |
| 761 scope = TRACE_EVENT_SCOPE_NAME_GLOBAL; | 854 scope = TRACE_EVENT_SCOPE_NAME_GLOBAL; |
| 762 break; | 855 break; |
| 763 | 856 |
| 764 case TRACE_EVENT_SCOPE_PROCESS: | 857 case TRACE_EVENT_SCOPE_PROCESS: |
| 765 scope = TRACE_EVENT_SCOPE_NAME_PROCESS; | 858 scope = TRACE_EVENT_SCOPE_NAME_PROCESS; |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 977 TraceBucketData::~TraceBucketData() { | 1070 TraceBucketData::~TraceBucketData() { |
| 978 } | 1071 } |
| 979 | 1072 |
| 980 //////////////////////////////////////////////////////////////////////////////// | 1073 //////////////////////////////////////////////////////////////////////////////// |
| 981 // | 1074 // |
| 982 // TraceLog | 1075 // TraceLog |
| 983 // | 1076 // |
| 984 //////////////////////////////////////////////////////////////////////////////// | 1077 //////////////////////////////////////////////////////////////////////////////// |
| 985 | 1078 |
| 986 class TraceLog::ThreadLocalEventBuffer | 1079 class TraceLog::ThreadLocalEventBuffer |
| 987 : public MessageLoop::DestructionObserver { | 1080 : public MessageLoop::DestructionObserver, |
| 1081 public MemoryDumpProvider { |
| 988 public: | 1082 public: |
| 989 ThreadLocalEventBuffer(TraceLog* trace_log); | 1083 ThreadLocalEventBuffer(TraceLog* trace_log); |
| 990 ~ThreadLocalEventBuffer() override; | 1084 ~ThreadLocalEventBuffer() override; |
| 991 | 1085 |
| 992 TraceEvent* AddTraceEvent(TraceEventHandle* handle); | 1086 TraceEvent* AddTraceEvent(TraceEventHandle* handle); |
| 993 | 1087 |
| 994 void ReportOverhead(const TraceTicks& event_timestamp, | 1088 void ReportOverhead(const TraceTicks& event_timestamp, |
| 995 const ThreadTicks& event_thread_timestamp); | 1089 const ThreadTicks& event_thread_timestamp); |
| 996 | 1090 |
| 997 TraceEvent* GetEventByHandle(TraceEventHandle handle) { | 1091 TraceEvent* GetEventByHandle(TraceEventHandle handle) { |
| 998 if (!chunk_ || handle.chunk_seq != chunk_->seq() || | 1092 if (!chunk_ || handle.chunk_seq != chunk_->seq() || |
| 999 handle.chunk_index != chunk_index_) | 1093 handle.chunk_index != chunk_index_) |
| 1000 return NULL; | 1094 return NULL; |
| 1001 | 1095 |
| 1002 return chunk_->GetEventAt(handle.event_index); | 1096 return chunk_->GetEventAt(handle.event_index); |
| 1003 } | 1097 } |
| 1004 | 1098 |
| 1005 int generation() const { return generation_; } | 1099 int generation() const { return generation_; } |
| 1006 | 1100 |
| 1007 private: | 1101 private: |
| 1008 // MessageLoop::DestructionObserver | 1102 // MessageLoop::DestructionObserver |
| 1009 void WillDestroyCurrentMessageLoop() override; | 1103 void WillDestroyCurrentMessageLoop() override; |
| 1010 | 1104 |
| 1105 // MemoryDumpProvider implementation. |
| 1106 bool OnMemoryDump(ProcessMemoryDump* pmd) override; |
| 1107 |
| 1011 void FlushWhileLocked(); | 1108 void FlushWhileLocked(); |
| 1012 | 1109 |
| 1013 void CheckThisIsCurrentBuffer() const { | 1110 void CheckThisIsCurrentBuffer() const { |
| 1014 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); | 1111 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); |
| 1015 } | 1112 } |
| 1016 | 1113 |
| 1017 // Since TraceLog is a leaky singleton, trace_log_ will always be valid | 1114 // Since TraceLog is a leaky singleton, trace_log_ will always be valid |
| 1018 // as long as the thread exists. | 1115 // as long as the thread exists. |
| 1019 TraceLog* trace_log_; | 1116 TraceLog* trace_log_; |
| 1020 scoped_ptr<TraceBufferChunk> chunk_; | 1117 scoped_ptr<TraceBufferChunk> chunk_; |
| 1021 size_t chunk_index_; | 1118 size_t chunk_index_; |
| 1022 int event_count_; | 1119 int event_count_; |
| 1023 TimeDelta overhead_; | 1120 TimeDelta overhead_; |
| 1024 int generation_; | 1121 int generation_; |
| 1025 | 1122 |
| 1026 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); | 1123 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); |
| 1027 }; | 1124 }; |
| 1028 | 1125 |
| 1029 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) | 1126 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) |
| 1030 : trace_log_(trace_log), | 1127 : trace_log_(trace_log), |
| 1031 chunk_index_(0), | 1128 chunk_index_(0), |
| 1032 event_count_(0), | 1129 event_count_(0), |
| 1033 generation_(trace_log->generation()) { | 1130 generation_(trace_log->generation()) { |
| 1034 // ThreadLocalEventBuffer is created only if the thread has a message loop, so | 1131 // ThreadLocalEventBuffer is created only if the thread has a message loop, so |
| 1035 // the following message_loop won't be NULL. | 1132 // the following message_loop won't be NULL. |
| 1036 MessageLoop* message_loop = MessageLoop::current(); | 1133 MessageLoop* message_loop = MessageLoop::current(); |
| 1037 message_loop->AddDestructionObserver(this); | 1134 message_loop->AddDestructionObserver(this); |
| 1038 | 1135 |
| 1136 // This is to report the local memory usage when memory-infra is enabled. |
| 1137 MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
| 1138 this, ThreadTaskRunnerHandle::Get()); |
| 1139 |
| 1039 AutoLock lock(trace_log->lock_); | 1140 AutoLock lock(trace_log->lock_); |
| 1040 trace_log->thread_message_loops_.insert(message_loop); | 1141 trace_log->thread_message_loops_.insert(message_loop); |
| 1041 } | 1142 } |
| 1042 | 1143 |
| 1043 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { | 1144 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { |
| 1044 CheckThisIsCurrentBuffer(); | 1145 CheckThisIsCurrentBuffer(); |
| 1045 MessageLoop::current()->RemoveDestructionObserver(this); | 1146 MessageLoop::current()->RemoveDestructionObserver(this); |
| 1147 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this); |
| 1046 | 1148 |
| 1047 // Zero event_count_ happens in either of the following cases: | 1149 // Zero event_count_ happens in either of the following cases: |
| 1048 // - no event generated for the thread; | 1150 // - no event generated for the thread; |
| 1049 // - the thread has no message loop; | 1151 // - the thread has no message loop; |
| 1050 // - trace_event_overhead is disabled. | 1152 // - trace_event_overhead is disabled. |
| 1051 if (event_count_) { | 1153 if (event_count_) { |
| 1052 InitializeMetadataEvent(AddTraceEvent(NULL), | 1154 InitializeMetadataEvent(AddTraceEvent(NULL), |
| 1053 static_cast<int>(base::PlatformThread::CurrentId()), | 1155 static_cast<int>(base::PlatformThread::CurrentId()), |
| 1054 "overhead", "average_overhead", | 1156 "overhead", "average_overhead", |
| 1055 overhead_.InMillisecondsF() / event_count_); | 1157 overhead_.InMillisecondsF() / event_count_); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1112 trace_event->UpdateDuration(now, thread_now); | 1214 trace_event->UpdateDuration(now, thread_now); |
| 1113 } | 1215 } |
| 1114 } | 1216 } |
| 1115 overhead_ += overhead; | 1217 overhead_ += overhead; |
| 1116 } | 1218 } |
| 1117 | 1219 |
| 1118 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { | 1220 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { |
| 1119 delete this; | 1221 delete this; |
| 1120 } | 1222 } |
| 1121 | 1223 |
| 1224 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump* pmd) { |
| 1225 if (!chunk_) |
| 1226 return true; |
| 1227 std::string dump_base_name = StringPrintf( |
| 1228 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId())); |
| 1229 TraceEventMemoryOverhead overhead; |
| 1230 chunk_->EstimateTraceMemoryOverhead(&overhead); |
| 1231 overhead.DumpInto(dump_base_name.c_str(), pmd); |
| 1232 return true; |
| 1233 } |
| 1234 |
| 1122 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { | 1235 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { |
| 1123 if (!chunk_) | 1236 if (!chunk_) |
| 1124 return; | 1237 return; |
| 1125 | 1238 |
| 1126 trace_log_->lock_.AssertAcquired(); | 1239 trace_log_->lock_.AssertAcquired(); |
| 1127 if (trace_log_->CheckGeneration(generation_)) { | 1240 if (trace_log_->CheckGeneration(generation_)) { |
| 1128 // Return the chunk to the buffer only if the generation matches. | 1241 // Return the chunk to the buffer only if the generation matches. |
| 1129 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass()); | 1242 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass()); |
| 1130 } | 1243 } |
| 1131 // Otherwise this method may be called from the destructor, or TraceLog will | 1244 // Otherwise this method may be called from the destructor, or TraceLog will |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1188 filter.append(kEchoToConsoleCategoryFilter); | 1301 filter.append(kEchoToConsoleCategoryFilter); |
| 1189 } | 1302 } |
| 1190 | 1303 |
| 1191 LOG(ERROR) << "Start " << switches::kTraceToConsole | 1304 LOG(ERROR) << "Start " << switches::kTraceToConsole |
| 1192 << " with CategoryFilter '" << filter << "'."; | 1305 << " with CategoryFilter '" << filter << "'."; |
| 1193 SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE); | 1306 SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE); |
| 1194 } | 1307 } |
| 1195 #endif | 1308 #endif |
| 1196 | 1309 |
| 1197 logged_events_.reset(CreateTraceBuffer()); | 1310 logged_events_.reset(CreateTraceBuffer()); |
| 1311 |
| 1312 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this); |
| 1198 } | 1313 } |
| 1199 | 1314 |
| 1200 TraceLog::~TraceLog() { | 1315 TraceLog::~TraceLog() { |
| 1201 } | 1316 } |
| 1202 | 1317 |
| 1318 void TraceLog::InitializeThreadLocalEventBufferIfSupported() { |
| 1319 // A ThreadLocalEventBuffer needs the message loop |
| 1320 // - to know when the thread exits; |
| 1321 // - to handle the final flush. |
| 1322 // For a thread without a message loop or the message loop may be blocked, the |
| 1323 // trace events will be added into the main buffer directly. |
| 1324 if (thread_blocks_message_loop_.Get() || !MessageLoop::current()) |
| 1325 return; |
| 1326 auto thread_local_event_buffer = thread_local_event_buffer_.Get(); |
| 1327 if (thread_local_event_buffer && |
| 1328 !CheckGeneration(thread_local_event_buffer->generation())) { |
| 1329 delete thread_local_event_buffer; |
| 1330 thread_local_event_buffer = NULL; |
| 1331 } |
| 1332 if (!thread_local_event_buffer) { |
| 1333 thread_local_event_buffer = new ThreadLocalEventBuffer(this); |
| 1334 thread_local_event_buffer_.Set(thread_local_event_buffer); |
| 1335 } |
| 1336 } |
| 1337 |
| 1338 bool TraceLog::OnMemoryDump(ProcessMemoryDump* pmd) { |
| 1339 TraceEventMemoryOverhead overhead; |
| 1340 overhead.Add("TraceLog", sizeof(*this)); |
| 1341 { |
| 1342 AutoLock lock(lock_); |
| 1343 if (logged_events_) |
| 1344 logged_events_->EstimateTraceMemoryOverhead(&overhead); |
| 1345 } |
| 1346 overhead.AddSelf(); |
| 1347 overhead.DumpInto("tracing/main_trace_log", pmd); |
| 1348 return true; |
| 1349 } |
| 1350 |
| 1203 const unsigned char* TraceLog::GetCategoryGroupEnabled( | 1351 const unsigned char* TraceLog::GetCategoryGroupEnabled( |
| 1204 const char* category_group) { | 1352 const char* category_group) { |
| 1205 TraceLog* tracelog = GetInstance(); | 1353 TraceLog* tracelog = GetInstance(); |
| 1206 if (!tracelog) { | 1354 if (!tracelog) { |
| 1207 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); | 1355 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
| 1208 return &g_category_group_enabled[g_category_already_shutdown]; | 1356 return &g_category_group_enabled[g_category_already_shutdown]; |
| 1209 } | 1357 } |
| 1210 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 1358 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
| 1211 } | 1359 } |
| 1212 | 1360 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1232 if (mode_ == RECORDING_MODE && | 1380 if (mode_ == RECORDING_MODE && |
| 1233 trace_config_.IsCategoryGroupEnabled(category_group)) | 1381 trace_config_.IsCategoryGroupEnabled(category_group)) |
| 1234 enabled_flag |= ENABLED_FOR_RECORDING; | 1382 enabled_flag |= ENABLED_FOR_RECORDING; |
| 1235 else if (mode_ == MONITORING_MODE && | 1383 else if (mode_ == MONITORING_MODE && |
| 1236 trace_config_.IsCategoryGroupEnabled(category_group)) | 1384 trace_config_.IsCategoryGroupEnabled(category_group)) |
| 1237 enabled_flag |= ENABLED_FOR_MONITORING; | 1385 enabled_flag |= ENABLED_FOR_MONITORING; |
| 1238 if (event_callback_ && | 1386 if (event_callback_ && |
| 1239 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) | 1387 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) |
| 1240 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 1388 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
| 1241 #if defined(OS_WIN) | 1389 #if defined(OS_WIN) |
| 1242 if (base::trace_event::TraceEventETWExport::isETWExportEnabled()) | 1390 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
| 1391 category_group)) { |
| 1243 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 1392 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
| 1393 } |
| 1244 #endif | 1394 #endif |
| 1245 | 1395 |
| 1246 g_category_group_enabled[category_index] = enabled_flag; | 1396 g_category_group_enabled[category_index] = enabled_flag; |
| 1247 } | 1397 } |
| 1248 | 1398 |
| 1249 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 1399 void TraceLog::UpdateCategoryGroupEnabledFlags() { |
| 1250 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 1400 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
| 1251 for (size_t i = 0; i < category_index; i++) | 1401 for (size_t i = 0; i < category_index; i++) |
| 1252 UpdateCategoryGroupEnabledFlag(i); | 1402 UpdateCategoryGroupEnabledFlag(i); |
| 1253 } | 1403 } |
| (...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1620 // flush_task_runner_; | 1770 // flush_task_runner_; |
| 1621 // 2. If thread_message_loops_ is not empty, thread A posts task to each message | 1771 // 2. If thread_message_loops_ is not empty, thread A posts task to each message |
| 1622 // loop to flush the thread local buffers; otherwise finish the flush; | 1772 // loop to flush the thread local buffers; otherwise finish the flush; |
| 1623 // 3. FlushCurrentThread() deletes the thread local event buffer: | 1773 // 3. FlushCurrentThread() deletes the thread local event buffer: |
| 1624 // - The last batch of events of the thread are flushed into the main buffer; | 1774 // - The last batch of events of the thread are flushed into the main buffer; |
| 1625 // - The message loop will be removed from thread_message_loops_; | 1775 // - The message loop will be removed from thread_message_loops_; |
| 1626 // If this is the last message loop, finish the flush; | 1776 // If this is the last message loop, finish the flush; |
| 1627 // 4. If any thread hasn't finish its flush in time, finish the flush. | 1777 // 4. If any thread hasn't finish its flush in time, finish the flush. |
| 1628 void TraceLog::Flush(const TraceLog::OutputCallback& cb, | 1778 void TraceLog::Flush(const TraceLog::OutputCallback& cb, |
| 1629 bool use_worker_thread) { | 1779 bool use_worker_thread) { |
| 1780 FlushInternal(cb, use_worker_thread, false); |
| 1781 } |
| 1782 |
| 1783 void TraceLog::CancelTracing(const OutputCallback& cb) { |
| 1784 SetDisabled(); |
| 1785 FlushInternal(cb, false, true); |
| 1786 } |
| 1787 |
| 1788 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb, |
| 1789 bool use_worker_thread, |
| 1790 bool discard_events) { |
| 1630 use_worker_thread_ = use_worker_thread; | 1791 use_worker_thread_ = use_worker_thread; |
| 1631 if (IsEnabled()) { | 1792 if (IsEnabled()) { |
| 1632 // Can't flush when tracing is enabled because otherwise PostTask would | 1793 // Can't flush when tracing is enabled because otherwise PostTask would |
| 1633 // - generate more trace events; | 1794 // - generate more trace events; |
| 1634 // - deschedule the calling thread on some platforms causing inaccurate | 1795 // - deschedule the calling thread on some platforms causing inaccurate |
| 1635 // timing of the trace events. | 1796 // timing of the trace events. |
| 1636 scoped_refptr<RefCountedString> empty_result = new RefCountedString; | 1797 scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
| 1637 if (!cb.is_null()) | 1798 if (!cb.is_null()) |
| 1638 cb.Run(empty_result, false); | 1799 cb.Run(empty_result, false); |
| 1639 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled"; | 1800 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled"; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1663 thread_message_loops_.begin(); | 1824 thread_message_loops_.begin(); |
| 1664 it != thread_message_loops_.end(); ++it) { | 1825 it != thread_message_loops_.end(); ++it) { |
| 1665 thread_message_loop_task_runners.push_back((*it)->task_runner()); | 1826 thread_message_loop_task_runners.push_back((*it)->task_runner()); |
| 1666 } | 1827 } |
| 1667 } | 1828 } |
| 1668 } | 1829 } |
| 1669 | 1830 |
| 1670 if (thread_message_loop_task_runners.size()) { | 1831 if (thread_message_loop_task_runners.size()) { |
| 1671 for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) { | 1832 for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) { |
| 1672 thread_message_loop_task_runners[i]->PostTask( | 1833 thread_message_loop_task_runners[i]->PostTask( |
| 1673 FROM_HERE, | 1834 FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this), |
| 1674 Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation)); | 1835 generation, discard_events)); |
| 1675 } | 1836 } |
| 1676 flush_task_runner_->PostDelayedTask( | 1837 flush_task_runner_->PostDelayedTask( |
| 1677 FROM_HERE, | 1838 FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation, |
| 1678 Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation), | 1839 discard_events), |
| 1679 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); | 1840 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); |
| 1680 return; | 1841 return; |
| 1681 } | 1842 } |
| 1682 | 1843 |
| 1683 FinishFlush(generation); | 1844 FinishFlush(generation, discard_events); |
| 1684 } | 1845 } |
| 1685 | 1846 |
| 1686 // Usually it runs on a different thread. | 1847 // Usually it runs on a different thread. |
| 1687 void TraceLog::ConvertTraceEventsToTraceFormat( | 1848 void TraceLog::ConvertTraceEventsToTraceFormat( |
| 1688 scoped_ptr<TraceBuffer> logged_events, | 1849 scoped_ptr<TraceBuffer> logged_events, |
| 1689 const OutputCallback& flush_output_callback, | 1850 const OutputCallback& flush_output_callback, |
| 1690 const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) { | 1851 const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) { |
| 1691 if (flush_output_callback.is_null()) | 1852 if (flush_output_callback.is_null()) |
| 1692 return; | 1853 return; |
| 1693 | 1854 |
| 1694 // The callback need to be called at least once even if there is no events | 1855 // The callback need to be called at least once even if there is no events |
| 1695 // to let the caller know the completion of flush. | 1856 // to let the caller know the completion of flush. |
| 1696 bool has_more_events = true; | 1857 scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString(); |
| 1697 do { | 1858 while (const TraceBufferChunk* chunk = logged_events->NextChunk()) { |
| 1698 scoped_refptr<RefCountedString> json_events_str_ptr = | 1859 for (size_t j = 0; j < chunk->size(); ++j) { |
| 1699 new RefCountedString(); | 1860 size_t size = json_events_str_ptr->size(); |
| 1700 | 1861 if (size > kTraceEventBufferSizeInBytes) { |
| 1701 while (json_events_str_ptr->size() < kTraceEventBufferSizeInBytes) { | 1862 flush_output_callback.Run(json_events_str_ptr, true); |
| 1702 const TraceBufferChunk* chunk = logged_events->NextChunk(); | 1863 json_events_str_ptr = new RefCountedString(); |
| 1703 has_more_events = chunk != NULL; | 1864 } else if (size) { |
| 1704 if (!chunk) | 1865 json_events_str_ptr->data().append(",\n"); |
| 1705 break; | |
| 1706 for (size_t j = 0; j < chunk->size(); ++j) { | |
| 1707 if (json_events_str_ptr->size()) | |
| 1708 json_events_str_ptr->data().append(",\n"); | |
| 1709 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()), | |
| 1710 argument_filter_predicate); | |
| 1711 } | 1866 } |
| 1867 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()), |
| 1868 argument_filter_predicate); |
| 1712 } | 1869 } |
| 1713 flush_output_callback.Run(json_events_str_ptr, has_more_events); | 1870 } |
| 1714 } while (has_more_events); | 1871 flush_output_callback.Run(json_events_str_ptr, false); |
| 1715 } | 1872 } |
| 1716 | 1873 |
| 1717 void TraceLog::FinishFlush(int generation) { | 1874 void TraceLog::FinishFlush(int generation, bool discard_events) { |
| 1718 scoped_ptr<TraceBuffer> previous_logged_events; | 1875 scoped_ptr<TraceBuffer> previous_logged_events; |
| 1719 OutputCallback flush_output_callback; | 1876 OutputCallback flush_output_callback; |
| 1720 TraceEvent::ArgumentFilterPredicate argument_filter_predicate; | 1877 TraceEvent::ArgumentFilterPredicate argument_filter_predicate; |
| 1721 | 1878 |
| 1722 if (!CheckGeneration(generation)) | 1879 if (!CheckGeneration(generation)) |
| 1723 return; | 1880 return; |
| 1724 | 1881 |
| 1725 { | 1882 { |
| 1726 AutoLock lock(lock_); | 1883 AutoLock lock(lock_); |
| 1727 | 1884 |
| 1728 previous_logged_events.swap(logged_events_); | 1885 previous_logged_events.swap(logged_events_); |
| 1729 UseNextTraceBuffer(); | 1886 UseNextTraceBuffer(); |
| 1730 thread_message_loops_.clear(); | 1887 thread_message_loops_.clear(); |
| 1731 | 1888 |
| 1732 flush_task_runner_ = NULL; | 1889 flush_task_runner_ = NULL; |
| 1733 flush_output_callback = flush_output_callback_; | 1890 flush_output_callback = flush_output_callback_; |
| 1734 flush_output_callback_.Reset(); | 1891 flush_output_callback_.Reset(); |
| 1735 | 1892 |
| 1736 if (trace_options() & kInternalEnableArgumentFilter) { | 1893 if (trace_options() & kInternalEnableArgumentFilter) { |
| 1737 CHECK(!argument_filter_predicate_.is_null()); | 1894 CHECK(!argument_filter_predicate_.is_null()); |
| 1738 argument_filter_predicate = argument_filter_predicate_; | 1895 argument_filter_predicate = argument_filter_predicate_; |
| 1739 } | 1896 } |
| 1740 } | 1897 } |
| 1741 | 1898 |
| 1899 if (discard_events) { |
| 1900 if (!flush_output_callback.is_null()) { |
| 1901 scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
| 1902 flush_output_callback.Run(empty_result, false); |
| 1903 } |
| 1904 return; |
| 1905 } |
| 1906 |
| 1742 if (use_worker_thread_ && | 1907 if (use_worker_thread_ && |
| 1743 WorkerPool::PostTask( | 1908 WorkerPool::PostTask( |
| 1744 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat, | 1909 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat, |
| 1745 Passed(&previous_logged_events), | 1910 Passed(&previous_logged_events), |
| 1746 flush_output_callback, argument_filter_predicate), | 1911 flush_output_callback, argument_filter_predicate), |
| 1747 true)) { | 1912 true)) { |
| 1748 return; | 1913 return; |
| 1749 } | 1914 } |
| 1750 | 1915 |
| 1751 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), | 1916 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), |
| 1752 flush_output_callback, | 1917 flush_output_callback, |
| 1753 argument_filter_predicate); | 1918 argument_filter_predicate); |
| 1754 } | 1919 } |
| 1755 | 1920 |
| 1756 // Run in each thread holding a local event buffer. | 1921 // Run in each thread holding a local event buffer. |
| 1757 void TraceLog::FlushCurrentThread(int generation) { | 1922 void TraceLog::FlushCurrentThread(int generation, bool discard_events) { |
| 1758 { | 1923 { |
| 1759 AutoLock lock(lock_); | 1924 AutoLock lock(lock_); |
| 1760 if (!CheckGeneration(generation) || !flush_task_runner_) { | 1925 if (!CheckGeneration(generation) || !flush_task_runner_) { |
| 1761 // This is late. The corresponding flush has finished. | 1926 // This is late. The corresponding flush has finished. |
| 1762 return; | 1927 return; |
| 1763 } | 1928 } |
| 1764 } | 1929 } |
| 1765 | 1930 |
| 1766 // This will flush the thread local buffer. | 1931 // This will flush the thread local buffer. |
| 1767 delete thread_local_event_buffer_.Get(); | 1932 delete thread_local_event_buffer_.Get(); |
| 1768 | 1933 |
| 1769 AutoLock lock(lock_); | 1934 AutoLock lock(lock_); |
| 1770 if (!CheckGeneration(generation) || !flush_task_runner_ || | 1935 if (!CheckGeneration(generation) || !flush_task_runner_ || |
| 1771 thread_message_loops_.size()) | 1936 thread_message_loops_.size()) |
| 1772 return; | 1937 return; |
| 1773 | 1938 |
| 1774 flush_task_runner_->PostTask( | 1939 flush_task_runner_->PostTask( |
| 1775 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation)); | 1940 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation, |
| 1941 discard_events)); |
| 1776 } | 1942 } |
| 1777 | 1943 |
| 1778 void TraceLog::OnFlushTimeout(int generation) { | 1944 void TraceLog::OnFlushTimeout(int generation, bool discard_events) { |
| 1779 { | 1945 { |
| 1780 AutoLock lock(lock_); | 1946 AutoLock lock(lock_); |
| 1781 if (!CheckGeneration(generation) || !flush_task_runner_) { | 1947 if (!CheckGeneration(generation) || !flush_task_runner_) { |
| 1782 // Flush has finished before timeout. | 1948 // Flush has finished before timeout. |
| 1783 return; | 1949 return; |
| 1784 } | 1950 } |
| 1785 | 1951 |
| 1786 LOG(WARNING) << | 1952 LOG(WARNING) << |
| 1787 "The following threads haven't finished flush in time. " | 1953 "The following threads haven't finished flush in time. " |
| 1788 "If this happens stably for some thread, please call " | 1954 "If this happens stably for some thread, please call " |
| 1789 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from " | 1955 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from " |
| 1790 "the thread to avoid its trace events from being lost."; | 1956 "the thread to avoid its trace events from being lost."; |
| 1791 for (hash_set<MessageLoop*>::const_iterator it = | 1957 for (hash_set<MessageLoop*>::const_iterator it = |
| 1792 thread_message_loops_.begin(); | 1958 thread_message_loops_.begin(); |
| 1793 it != thread_message_loops_.end(); ++it) { | 1959 it != thread_message_loops_.end(); ++it) { |
| 1794 LOG(WARNING) << "Thread: " << (*it)->thread_name(); | 1960 LOG(WARNING) << "Thread: " << (*it)->thread_name(); |
| 1795 } | 1961 } |
| 1796 } | 1962 } |
| 1797 FinishFlush(generation); | 1963 FinishFlush(generation, discard_events); |
| 1798 } | 1964 } |
| 1799 | 1965 |
| 1800 void TraceLog::FlushButLeaveBufferIntact( | 1966 void TraceLog::FlushButLeaveBufferIntact( |
| 1801 const TraceLog::OutputCallback& flush_output_callback) { | 1967 const TraceLog::OutputCallback& flush_output_callback) { |
| 1802 scoped_ptr<TraceBuffer> previous_logged_events; | 1968 scoped_ptr<TraceBuffer> previous_logged_events; |
| 1803 TraceEvent::ArgumentFilterPredicate argument_filter_predicate; | 1969 TraceEvent::ArgumentFilterPredicate argument_filter_predicate; |
| 1804 { | 1970 { |
| 1805 AutoLock lock(lock_); | 1971 AutoLock lock(lock_); |
| 1806 AddMetadataEventsWhileLocked(); | 1972 AddMetadataEventsWhileLocked(); |
| 1807 if (thread_shared_chunk_) { | 1973 if (thread_shared_chunk_) { |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1832 TraceEventHandle TraceLog::AddTraceEvent( | 1998 TraceEventHandle TraceLog::AddTraceEvent( |
| 1833 char phase, | 1999 char phase, |
| 1834 const unsigned char* category_group_enabled, | 2000 const unsigned char* category_group_enabled, |
| 1835 const char* name, | 2001 const char* name, |
| 1836 unsigned long long id, | 2002 unsigned long long id, |
| 1837 int num_args, | 2003 int num_args, |
| 1838 const char** arg_names, | 2004 const char** arg_names, |
| 1839 const unsigned char* arg_types, | 2005 const unsigned char* arg_types, |
| 1840 const unsigned long long* arg_values, | 2006 const unsigned long long* arg_values, |
| 1841 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | 2007 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 1842 unsigned char flags) { | 2008 unsigned int flags) { |
| 1843 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); | 2009 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); |
| 1844 base::TraceTicks now = base::TraceTicks::Now(); | 2010 base::TraceTicks now = base::TraceTicks::Now(); |
| 1845 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, | 2011 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, |
| 1846 name, id, thread_id, now, | 2012 name, id, thread_id, now, |
| 1847 num_args, arg_names, | 2013 num_args, arg_names, |
| 1848 arg_types, arg_values, | 2014 arg_types, arg_values, |
| 1849 convertable_values, flags); | 2015 convertable_values, flags); |
| 1850 } | 2016 } |
| 1851 | 2017 |
| 1852 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( | 2018 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
| 1853 char phase, | 2019 char phase, |
| 1854 const unsigned char* category_group_enabled, | 2020 const unsigned char* category_group_enabled, |
| 1855 const char* name, | 2021 const char* name, |
| 1856 unsigned long long id, | 2022 unsigned long long id, |
| 1857 int thread_id, | 2023 int thread_id, |
| 1858 const TraceTicks& timestamp, | 2024 const TraceTicks& timestamp, |
| 1859 int num_args, | 2025 int num_args, |
| 1860 const char** arg_names, | 2026 const char** arg_names, |
| 1861 const unsigned char* arg_types, | 2027 const unsigned char* arg_types, |
| 1862 const unsigned long long* arg_values, | 2028 const unsigned long long* arg_values, |
| 1863 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | 2029 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 1864 unsigned char flags) { | 2030 unsigned int flags) { |
| 1865 TraceEventHandle handle = { 0, 0, 0 }; | 2031 TraceEventHandle handle = { 0, 0, 0 }; |
| 1866 if (!*category_group_enabled) | 2032 if (!*category_group_enabled) |
| 1867 return handle; | 2033 return handle; |
| 1868 | 2034 |
| 1869 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when | 2035 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
| 1870 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> | 2036 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
| 1871 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... | 2037 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
| 1872 if (thread_is_in_trace_event_.Get()) | 2038 if (thread_is_in_trace_event_.Get()) |
| 1873 return handle; | 2039 return handle; |
| 1874 | 2040 |
| 1875 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); | 2041 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); |
| 1876 | 2042 |
| 1877 DCHECK(name); | 2043 DCHECK(name); |
| 1878 DCHECK(!timestamp.is_null()); | 2044 DCHECK(!timestamp.is_null()); |
| 1879 | 2045 |
| 1880 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) | 2046 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
| 1881 id = MangleEventId(id); | 2047 id = MangleEventId(id); |
| 1882 | 2048 |
| 1883 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp); | 2049 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp); |
| 1884 TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ? | 2050 TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ? |
| 1885 OffsetNow() : offset_event_timestamp; | 2051 OffsetNow() : offset_event_timestamp; |
| 1886 ThreadTicks thread_now = ThreadNow(); | 2052 ThreadTicks thread_now = ThreadNow(); |
| 1887 | 2053 |
| 1888 ThreadLocalEventBuffer* thread_local_event_buffer = NULL; | 2054 // |thread_local_event_buffer_| can be null if the current thread doesn't have |
| 1889 // A ThreadLocalEventBuffer needs the message loop | 2055 // a message loop or the message loop is blocked. |
| 1890 // - to know when the thread exits; | 2056 InitializeThreadLocalEventBufferIfSupported(); |
| 1891 // - to handle the final flush. | 2057 auto thread_local_event_buffer = thread_local_event_buffer_.Get(); |
| 1892 // For a thread without a message loop or the message loop may be blocked, the | |
| 1893 // trace events will be added into the main buffer directly. | |
| 1894 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) { | |
| 1895 thread_local_event_buffer = thread_local_event_buffer_.Get(); | |
| 1896 if (thread_local_event_buffer && | |
| 1897 !CheckGeneration(thread_local_event_buffer->generation())) { | |
| 1898 delete thread_local_event_buffer; | |
| 1899 thread_local_event_buffer = NULL; | |
| 1900 } | |
| 1901 if (!thread_local_event_buffer) { | |
| 1902 thread_local_event_buffer = new ThreadLocalEventBuffer(this); | |
| 1903 thread_local_event_buffer_.Set(thread_local_event_buffer); | |
| 1904 } | |
| 1905 } | |
| 1906 | 2058 |
| 1907 // Check and update the current thread name only if the event is for the | 2059 // Check and update the current thread name only if the event is for the |
| 1908 // current thread to avoid locks in most cases. | 2060 // current thread to avoid locks in most cases. |
| 1909 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { | 2061 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { |
| 1910 const char* new_name = ThreadIdNameManager::GetInstance()-> | 2062 const char* new_name = ThreadIdNameManager::GetInstance()-> |
| 1911 GetName(thread_id); | 2063 GetName(thread_id); |
| 1912 // Check if the thread name has been set or changed since the previous | 2064 // Check if the thread name has been set or changed since the previous |
| 1913 // call (if any), but don't bother if the new name is empty. Note this will | 2065 // call (if any), but don't bother if the new name is empty. Note this will |
| 1914 // not detect a thread name change within the same char* buffer address: we | 2066 // not detect a thread name change within the same char* buffer address: we |
| 1915 // favor common case performance over corner case correctness. | 2067 // favor common case performance over corner case correctness. |
| 1916 if (new_name != g_current_thread_name.Get().Get() && | 2068 if (new_name != g_current_thread_name.Get().Get() && |
| 1917 new_name && *new_name) { | 2069 new_name && *new_name) { |
| 1918 g_current_thread_name.Get().Set(new_name); | 2070 g_current_thread_name.Get().Set(new_name); |
| 1919 | 2071 |
| 1920 AutoLock thread_info_lock(thread_info_lock_); | 2072 AutoLock thread_info_lock(thread_info_lock_); |
| 1921 | 2073 |
| 1922 hash_map<int, std::string>::iterator existing_name = | 2074 hash_map<int, std::string>::iterator existing_name = |
| 1923 thread_names_.find(thread_id); | 2075 thread_names_.find(thread_id); |
| 1924 if (existing_name == thread_names_.end()) { | 2076 if (existing_name == thread_names_.end()) { |
| 1925 // This is a new thread id, and a new name. | 2077 // This is a new thread id, and a new name. |
| 1926 thread_names_[thread_id] = new_name; | 2078 thread_names_[thread_id] = new_name; |
| 1927 } else { | 2079 } else { |
| 1928 // This is a thread id that we've seen before, but potentially with a | 2080 // This is a thread id that we've seen before, but potentially with a |
| 1929 // new name. | 2081 // new name. |
| 1930 std::vector<StringPiece> existing_names; | 2082 std::vector<StringPiece> existing_names = base::SplitStringPiece( |
| 1931 Tokenize(existing_name->second, ",", &existing_names); | 2083 existing_name->second, ",", base::KEEP_WHITESPACE, |
| 2084 base::SPLIT_WANT_NONEMPTY); |
| 1932 bool found = std::find(existing_names.begin(), | 2085 bool found = std::find(existing_names.begin(), |
| 1933 existing_names.end(), | 2086 existing_names.end(), |
| 1934 new_name) != existing_names.end(); | 2087 new_name) != existing_names.end(); |
| 1935 if (!found) { | 2088 if (!found) { |
| 1936 if (existing_names.size()) | 2089 if (existing_names.size()) |
| 1937 existing_name->second.push_back(','); | 2090 existing_name->second.push_back(','); |
| 1938 existing_name->second.append(new_name); | 2091 existing_name->second.append(new_name); |
| 1939 } | 2092 } |
| 1940 } | 2093 } |
| 1941 } | 2094 } |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2187 } | 2340 } |
| 2188 | 2341 |
| 2189 if (process_labels_.size() > 0) { | 2342 if (process_labels_.size() > 0) { |
| 2190 std::vector<std::string> labels; | 2343 std::vector<std::string> labels; |
| 2191 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin(); | 2344 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin(); |
| 2192 it != process_labels_.end(); | 2345 it != process_labels_.end(); |
| 2193 it++) { | 2346 it++) { |
| 2194 labels.push_back(it->second); | 2347 labels.push_back(it->second); |
| 2195 } | 2348 } |
| 2196 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), | 2349 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2197 current_thread_id, | 2350 current_thread_id, "process_labels", "labels", |
| 2198 "process_labels", "labels", | 2351 base::JoinString(labels, ",")); |
| 2199 JoinString(labels, ',')); | |
| 2200 } | 2352 } |
| 2201 | 2353 |
| 2202 // Thread sort indices. | 2354 // Thread sort indices. |
| 2203 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin(); | 2355 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin(); |
| 2204 it != thread_sort_indices_.end(); | 2356 it != thread_sort_indices_.end(); |
| 2205 it++) { | 2357 it++) { |
| 2206 if (it->second == 0) | 2358 if (it->second == 0) |
| 2207 continue; | 2359 continue; |
| 2208 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), | 2360 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2209 it->first, | 2361 it->first, |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2327 } | 2479 } |
| 2328 | 2480 |
| 2329 void TraceLog::SetCurrentThreadBlocksMessageLoop() { | 2481 void TraceLog::SetCurrentThreadBlocksMessageLoop() { |
| 2330 thread_blocks_message_loop_.Set(true); | 2482 thread_blocks_message_loop_.Set(true); |
| 2331 if (thread_local_event_buffer_.Get()) { | 2483 if (thread_local_event_buffer_.Get()) { |
| 2332 // This will flush the thread local buffer. | 2484 // This will flush the thread local buffer. |
| 2333 delete thread_local_event_buffer_.Get(); | 2485 delete thread_local_event_buffer_.Get(); |
| 2334 } | 2486 } |
| 2335 } | 2487 } |
| 2336 | 2488 |
| 2489 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( |
| 2490 TraceEventMemoryOverhead* overhead) { |
| 2491 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); |
| 2492 } |
| 2493 |
| 2337 } // namespace trace_event | 2494 } // namespace trace_event |
| 2338 } // namespace base | 2495 } // namespace base |
| 2339 | 2496 |
| 2340 namespace trace_event_internal { | 2497 namespace trace_event_internal { |
| 2341 | 2498 |
| 2342 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( | 2499 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( |
| 2343 const char* category_group, const char* name) { | 2500 const char* category_group, const char* name) { |
| 2344 // The single atom works because for now the category_group can only be "gpu". | 2501 // The single atom works because for now the category_group can only be "gpu". |
| 2345 DCHECK_EQ(strcmp(category_group, "gpu"), 0); | 2502 DCHECK_EQ(strcmp(category_group, "gpu"), 0); |
| 2346 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; | 2503 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 2359 } | 2516 } |
| 2360 | 2517 |
| 2361 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 2518 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 2362 if (*category_group_enabled_) { | 2519 if (*category_group_enabled_) { |
| 2363 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, | 2520 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, |
| 2364 name_, event_handle_); | 2521 name_, event_handle_); |
| 2365 } | 2522 } |
| 2366 } | 2523 } |
| 2367 | 2524 |
| 2368 } // namespace trace_event_internal | 2525 } // namespace trace_event_internal |
| OLD | NEW |