Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1823)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2235273002: Refactor embedded structures to top-level scope. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: removed new ResourceData class (for later CL) Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include "base/debug/stack_trace.h" 7 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 8 #include "base/files/file.h"
9 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 15 matching lines...) Expand all
26 26
27 // A number that identifies the memory as having been initialized. It's 27 // A number that identifies the memory as having been initialized. It's
28 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker). 28 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker).
29 // A version number is added on so that major structure changes won't try to 29 // A version number is added on so that major structure changes won't try to
30 // read an older version (since the cookie won't match). 30 // read an older version (since the cookie won't match).
31 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1 31 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1
32 32
33 // The minimum depth a stack should support. 33 // The minimum depth a stack should support.
34 const int kMinStackDepth = 2; 34 const int kMinStackDepth = 2;
35 35
36 union ThreadRef {
37 int64_t as_id;
38 #if defined(OS_WIN)
39 // On Windows, the handle itself is often a pseudo-handle with a common
40 // value meaning "this thread" and so the thread-id is used. The former
41 // can be converted to a thread-id with a system call.
42 PlatformThreadId as_tid;
43 #elif defined(OS_POSIX)
44 // On Posix, the handle is always a unique identifier so no conversion
45 // needs to be done. However, it's value is officially opaque so there
46 // is no one correct way to convert it to a numerical identifier.
47 PlatformThreadHandle::Handle as_handle;
48 #endif
49 };
50
36 } // namespace 51 } // namespace
37 52
38 53
54 // It doesn't matter what is contained in this (though it will be all zeros)
55 // as only the address of it is important.
56 const ActivityData kNullActivityData = {};
57
58 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
59 ThreadRef thread_ref;
60 thread_ref.as_id = 0; // Zero the union in case other is smaller.
61 #if defined(OS_WIN)
62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
63 #elif defined(OS_POSIX)
64 thread_ref.as_handle = handle.platform_handle();
65 #endif
66 return ForThread(thread_ref.as_id);
67 }
68
69 // static
70 void Activity::FillFrom(Activity* activity,
71 const void* origin,
72 Type type,
73 const ActivityData& data) {
74 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
75 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
76 activity->activity_type = type;
77 activity->data = data;
78
79 #if defined(SYZYASAN)
80 // Create a stacktrace from the current location and get the addresses.
81 StackTrace stack_trace;
82 size_t stack_depth;
83 const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
84 // Copy the stack addresses, ignoring the first one (here).
85 size_t i;
86 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
87 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
88 }
89 activity->call_stack[i - 1] = 0;
90 #else
91 activity->call_stack[0] = 0;
92 #endif
93 }
94
95 ActivitySnapshot::ActivitySnapshot() {}
96 ActivitySnapshot::~ActivitySnapshot() {}
97
98
39 // This information is kept for every thread that is tracked. It is filled 99 // This information is kept for every thread that is tracked. It is filled
40 // the very first time the thread is seen. All fields must be of exact sizes 100 // the very first time the thread is seen. All fields must be of exact sizes
41 // so there is no issue moving between 32 and 64-bit builds. 101 // so there is no issue moving between 32 and 64-bit builds.
42 struct ThreadActivityTracker::Header { 102 struct ThreadActivityTracker::Header {
43 // This unique number indicates a valid initialization of the memory. 103 // This unique number indicates a valid initialization of the memory.
44 uint64_t cookie; 104 uint64_t cookie;
45 105
46 // The process-id and thread-id to which this data belongs. These identifiers 106 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
47 // are not guaranteed to mean anything but are unique, in combination, among 107 // These identifiers are not guaranteed to mean anything but are unique, in
48 // all active trackers. It would be nice to always have the process_id be a 108 // combination, among all active trackers. It would be nice to always have
49 // 64-bit value but the necessity of having it atomic (for the memory barriers 109 // the process_id be a 64-bit value but the necessity of having it atomic
50 // it provides) limits it to the natural word size of the machine. 110 // (for the memory barriers it provides) limits it to the natural word size
111 // of the machine.
51 #ifdef ARCH_CPU_64_BITS 112 #ifdef ARCH_CPU_64_BITS
52 std::atomic<int64_t> process_id; 113 std::atomic<int64_t> process_id;
53 #else 114 #else
54 std::atomic<int32_t> process_id; 115 std::atomic<int32_t> process_id;
55 int32_t process_id_padding; 116 int32_t process_id_padding;
56 #endif 117 #endif
57 118 ThreadRef thread_ref;
58 union {
59 int64_t as_id;
60 #if defined(OS_WIN)
61 // On Windows, the handle itself is often a pseudo-handle with a common
62 // value meaning "this thread" and so the thread-id is used. The former
63 // can be converted to a thread-id with a system call.
64 PlatformThreadId as_tid;
65 #elif defined(OS_POSIX)
66 // On Posix, the handle is always a unique identifier so no conversion
67 // needs to be done. However, it's value is officially opaque so there
68 // is no one correct way to convert it to a numerical identifier.
69 PlatformThreadHandle::Handle as_handle;
70 #endif
71 } thread_ref;
72 119
73 // The start-time and start-ticks when the data was created. Each activity 120 // The start-time and start-ticks when the data was created. Each activity
74 // record has a |time_internal| value that can be converted to a "wall time" 121 // record has a |time_internal| value that can be converted to a "wall time"
75 // with these two values. 122 // with these two values.
76 int64_t start_time; 123 int64_t start_time;
77 int64_t start_ticks; 124 int64_t start_ticks;
78 125
79 // The number of Activity slots in the data. 126 // The number of Activity slots in the data.
80 uint32_t stack_slots; 127 uint32_t stack_slots;
81 128
(...skipping 12 matching lines...) Expand all
94 // is not the current implementation so no parallel snapshots allowed). 141 // is not the current implementation so no parallel snapshots allowed).
95 std::atomic<uint32_t> stack_unchanged; 142 std::atomic<uint32_t> stack_unchanged;
96 143
97 // The name of the thread (up to a maximum length). Dynamic-length names 144 // The name of the thread (up to a maximum length). Dynamic-length names
98 // are not practical since the memory has to come from the same persistent 145 // are not practical since the memory has to come from the same persistent
99 // allocator that holds this structure and to which this object has no 146 // allocator that holds this structure and to which this object has no
100 // reference. 147 // reference.
101 char thread_name[32]; 148 char thread_name[32];
102 }; 149 };
103 150
104 // It doesn't matter what is contained in this (though it will be all zeros)
105 // as only the address of it is important.
106 const ThreadActivityTracker::ActivityData
107 ThreadActivityTracker::kNullActivityData = {};
108
109 ThreadActivityTracker::ActivityData
110 ThreadActivityTracker::ActivityData::ForThread(
111 const PlatformThreadHandle& handle) {
112 // Header already has a conversion union; reuse that.
113 ThreadActivityTracker::Header header;
114 header.thread_ref.as_id = 0; // Zero the union in case other is smaller.
115 #if defined(OS_WIN)
116 header.thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
117 #elif defined(OS_POSIX)
118 header.thread_ref.as_handle = handle.platform_handle();
119 #endif
120 return ForThread(header.thread_ref.as_id);
121 }
122
123 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {}
124 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {}
125
126
127 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) 151 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
128 : header_(static_cast<Header*>(base)), 152 : header_(static_cast<Header*>(base)),
129 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + 153 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
130 sizeof(Header))), 154 sizeof(Header))),
131 stack_slots_( 155 stack_slots_(
132 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { 156 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
133 DCHECK(thread_checker_.CalledOnValidThread()); 157 DCHECK(thread_checker_.CalledOnValidThread());
134 158
135 // Verify the parameters but fail gracefully if they're not valid so that 159 // Verify the parameters but fail gracefully if they're not valid so that
136 // production code based on external inputs will not crash. IsValid() will 160 // production code based on external inputs will not crash. IsValid() will
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 } else { 218 } else {
195 // This is a file with existing data. Perform basic consistency checks. 219 // This is a file with existing data. Perform basic consistency checks.
196 valid_ = true; 220 valid_ = true;
197 valid_ = IsValid(); 221 valid_ = IsValid();
198 } 222 }
199 } 223 }
200 224
201 ThreadActivityTracker::~ThreadActivityTracker() {} 225 ThreadActivityTracker::~ThreadActivityTracker() {}
202 226
203 void ThreadActivityTracker::PushActivity(const void* origin, 227 void ThreadActivityTracker::PushActivity(const void* origin,
204 ActivityType type, 228 Activity::Type type,
205 const ActivityData& data) { 229 const ActivityData& data) {
206 // A thread-checker creates a lock to check the thread-id which means 230 // A thread-checker creates a lock to check the thread-id which means
207 // re-entry into this code if lock acquisitions are being tracked. 231 // re-entry into this code if lock acquisitions are being tracked.
208 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread()); 232 DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
233 thread_checker_.CalledOnValidThread());
209 234
210 // Get the current depth of the stack. No access to other memory guarded 235 // Get the current depth of the stack. No access to other memory guarded
211 // by this variable is done here so a "relaxed" load is acceptable. 236 // by this variable is done here so a "relaxed" load is acceptable.
212 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); 237 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
213 238
214 // Handle the case where the stack depth has exceeded the storage capacity. 239 // Handle the case where the stack depth has exceeded the storage capacity.
215 // Extra entries will be lost leaving only the base of the stack. 240 // Extra entries will be lost leaving only the base of the stack.
216 if (depth >= stack_slots_) { 241 if (depth >= stack_slots_) {
217 // Since no other threads modify the data, no compare/exchange is needed. 242 // Since no other threads modify the data, no compare/exchange is needed.
218 // Since no other memory is being modified, a "relaxed" store is acceptable. 243 // Since no other memory is being modified, a "relaxed" store is acceptable.
219 header_->current_depth.store(depth + 1, std::memory_order_relaxed); 244 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
220 return; 245 return;
221 } 246 }
222 247
223 // Get a pointer to the next activity and load it. No atomicity is required 248 // Get a pointer to the next activity and load it. No atomicity is required
224 // here because the memory is known only to this thread. It will be made 249 // here because the memory is known only to this thread. It will be made
225 // known to other threads once the depth is incremented. 250 // known to other threads once the depth is incremented.
226 Activity* activity = &stack_[depth]; 251 Activity::FillFrom(&stack_[depth], origin, type, data);
227 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
228 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
229 activity->activity_type = type;
230 activity->data = data;
231
232 #if defined(SYZYASAN)
233 // Create a stacktrace from the current location and get the addresses.
234 StackTrace stack_trace;
235 size_t stack_depth;
236 const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
237 // Copy the stack addresses, ignoring the first one (here).
238 size_t i;
239 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
240 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
241 }
242 activity->call_stack[i - 1] = 0;
243 #else
244 // Since the memory was initially zero and nothing ever overwrites it in
245 // this "else" case, there is no need to write even the null terminator.
246 //activity->call_stack[0] = 0;
247 #endif
248 252
249 // Save the incremented depth. Because this guards |activity| memory filled 253 // Save the incremented depth. Because this guards |activity| memory filled
250 // above that may be read by another thread once the recorded depth changes, 254 // above that may be read by another thread once the recorded depth changes,
251 // a "release" store is required. 255 // a "release" store is required.
252 header_->current_depth.store(depth + 1, std::memory_order_release); 256 header_->current_depth.store(depth + 1, std::memory_order_release);
253 } 257 }
254 258
255 void ThreadActivityTracker::ChangeActivity(ActivityType type, 259 void ThreadActivityTracker::ChangeActivity(Activity::Type type,
256 const ActivityData& data) { 260 const ActivityData& data) {
257 DCHECK(thread_checker_.CalledOnValidThread()); 261 DCHECK(thread_checker_.CalledOnValidThread());
258 DCHECK(type != ACT_NULL || &data != &kNullActivityData); 262 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
259 263
260 // Get the current depth of the stack and acquire the data held there. 264 // Get the current depth of the stack and acquire the data held there.
261 uint32_t depth = header_->current_depth.load(std::memory_order_acquire); 265 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
262 DCHECK_LT(0U, depth); 266 DCHECK_LT(0U, depth);
263 267
264 // Update the information if it is being recorded (i.e. within slot limit). 268 // Update the information if it is being recorded (i.e. within slot limit).
265 if (depth <= stack_slots_) { 269 if (depth <= stack_slots_) {
266 Activity* activity = &stack_[depth - 1]; 270 Activity* activity = &stack_[depth - 1];
267 271
268 if (type != ACT_NULL) { 272 if (type != Activity::ACT_NULL) {
269 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK, 273 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
270 type & ACT_CATEGORY_MASK); 274 type & Activity::ACT_CATEGORY_MASK);
271 activity->activity_type = type; 275 activity->activity_type = type;
272 } 276 }
273 277
274 if (&data != &kNullActivityData) 278 if (&data != &kNullActivityData)
275 activity->data = data; 279 activity->data = data;
276 } 280 }
277 } 281 }
278 282
279 void ThreadActivityTracker::PopActivity() { 283 void ThreadActivityTracker::PopActivity() {
280 // Do an atomic decrement of the depth. No changes to stack entries guarded 284 // Do an atomic decrement of the depth. No changes to stack entries guarded
281 // by this variable are done here so a "relaxed" operation is acceptable. 285 // by this variable are done here so a "relaxed" operation is acceptable.
282 // |depth| will receive the value BEFORE it was modified. 286 // |depth| will receive the value BEFORE it was modified.
283 uint32_t depth = 287 uint32_t depth =
284 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); 288 header_->current_depth.fetch_sub(1, std::memory_order_relaxed);
285 289
286 // Validate that everything is running correctly. 290 // Validate that everything is running correctly.
287 DCHECK_LT(0U, depth); 291 DCHECK_LT(0U, depth);
288 292
289 // A thread-checker creates a lock to check the thread-id which means 293 // A thread-checker creates a lock to check the thread-id which means
290 // re-entry into this code if lock acquisitions are being tracked. 294 // re-entry into this code if lock acquisitions are being tracked.
291 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK_ACQUIRE || 295 DCHECK(stack_[depth - 1].activity_type == Activity::ACT_LOCK_ACQUIRE ||
292 thread_checker_.CalledOnValidThread()); 296 thread_checker_.CalledOnValidThread());
293 297
294 // The stack has shrunk meaning that some other thread trying to copy the 298 // The stack has shrunk meaning that some other thread trying to copy the
295 // contents for reporting purposes could get bad data. That thread would 299 // contents for reporting purposes could get bad data. That thread would
296 // have written a non-zero value into |stack_unchanged|; clearing it here 300 // have written a non-zero value into |stack_unchanged|; clearing it here
297 // will let that thread detect that something did change. This needs to 301 // will let that thread detect that something did change. This needs to
298 // happen after the atomic |depth| operation above so a "release" store 302 // happen after the atomic |depth| operation above so a "release" store
299 // is required. 303 // is required.
300 header_->stack_unchanged.store(0, std::memory_order_release); 304 header_->stack_unchanged.store(0, std::memory_order_release);
301 } 305 }
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
693 // the data won't be persisted. 697 // the data won't be persisted.
694 delete[] reinterpret_cast<char*>(mem_base); 698 delete[] reinterpret_cast<char*>(mem_base);
695 } 699 }
696 } 700 }
697 701
698 // static 702 // static
699 void GlobalActivityTracker::OnTLSDestroy(void* value) { 703 void GlobalActivityTracker::OnTLSDestroy(void* value) {
700 delete reinterpret_cast<ManagedActivityTracker*>(value); 704 delete reinterpret_cast<ManagedActivityTracker*>(value);
701 } 705 }
702 706
703
704 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, 707 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
705 uint8_t action, 708 uint8_t action,
706 uint32_t id, 709 uint32_t id,
707 int32_t info) 710 int32_t info)
708 : GlobalActivityTracker::ScopedThreadActivity( 711 : GlobalActivityTracker::ScopedThreadActivity(
709 location.program_counter(), 712 location.program_counter(),
710 static_cast<ThreadActivityTracker::ActivityType>( 713 static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
711 ThreadActivityTracker::ACT_GENERIC | action), 714 ActivityData::ForGeneric(id, info),
712 ThreadActivityTracker::ActivityData::ForGeneric(id, info),
713 /*lock_allowed=*/true), 715 /*lock_allowed=*/true),
714 id_(id) { 716 id_(id) {
715 // The action must not affect the category bits of the activity type. 717 // The action must not affect the category bits of the activity type.
716 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); 718 DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
717 } 719 }
718 720
719 void ScopedActivity::ChangeAction(uint8_t action) { 721 void ScopedActivity::ChangeAction(uint8_t action) {
720 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); 722 DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
721 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( 723 ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
722 ThreadActivityTracker::ACT_GENERIC | action), 724 kNullActivityData);
723 ThreadActivityTracker::kNullActivityData);
724 } 725 }
725 726
726 void ScopedActivity::ChangeInfo(int32_t info) { 727 void ScopedActivity::ChangeInfo(int32_t info) {
727 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL, 728 ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
728 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
729 } 729 }
730 730
731 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) { 731 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
732 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); 732 DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
733 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( 733 ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
734 ThreadActivityTracker::ACT_GENERIC | action), 734 ActivityData::ForGeneric(id_, info));
735 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
736 } 735 }
737 736
738 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task) 737 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task)
739 : GlobalActivityTracker::ScopedThreadActivity( 738 : GlobalActivityTracker::ScopedThreadActivity(
740 task.posted_from.program_counter(), 739 task.posted_from.program_counter(),
741 ThreadActivityTracker::ACT_TASK_RUN, 740 Activity::ACT_TASK_RUN,
742 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num), 741 ActivityData::ForTask(task.sequence_num),
743 /*lock_allowed=*/true) {} 742 /*lock_allowed=*/true) {}
744 743
745 ScopedLockAcquireActivity::ScopedLockAcquireActivity( 744 ScopedLockAcquireActivity::ScopedLockAcquireActivity(
746 const base::internal::LockImpl* lock) 745 const base::internal::LockImpl* lock)
747 : GlobalActivityTracker::ScopedThreadActivity( 746 : GlobalActivityTracker::ScopedThreadActivity(
748 nullptr, 747 nullptr,
749 ThreadActivityTracker::ACT_LOCK_ACQUIRE, 748 Activity::ACT_LOCK_ACQUIRE,
750 ThreadActivityTracker::ActivityData::ForLock(lock), 749 ActivityData::ForLock(lock),
751 /*lock_allowed=*/false) {} 750 /*lock_allowed=*/false) {}
752 751
753 ScopedEventWaitActivity::ScopedEventWaitActivity( 752 ScopedEventWaitActivity::ScopedEventWaitActivity(
754 const base::WaitableEvent* event) 753 const base::WaitableEvent* event)
755 : GlobalActivityTracker::ScopedThreadActivity( 754 : GlobalActivityTracker::ScopedThreadActivity(
756 nullptr, 755 nullptr,
757 ThreadActivityTracker::ACT_EVENT_WAIT, 756 Activity::ACT_EVENT_WAIT,
758 ThreadActivityTracker::ActivityData::ForEvent(event), 757 ActivityData::ForEvent(event),
759 /*lock_allowed=*/true) {} 758 /*lock_allowed=*/true) {}
760 759
761 ScopedThreadJoinActivity::ScopedThreadJoinActivity( 760 ScopedThreadJoinActivity::ScopedThreadJoinActivity(
762 const base::PlatformThreadHandle* thread) 761 const base::PlatformThreadHandle* thread)
763 : GlobalActivityTracker::ScopedThreadActivity( 762 : GlobalActivityTracker::ScopedThreadActivity(
764 nullptr, 763 nullptr,
765 ThreadActivityTracker::ACT_THREAD_JOIN, 764 Activity::ACT_THREAD_JOIN,
766 ThreadActivityTracker::ActivityData::ForThread(*thread), 765 ActivityData::ForThread(*thread),
767 /*lock_allowed=*/true) {} 766 /*lock_allowed=*/true) {}
768 767
769 #if !defined(OS_NACL) && !defined(OS_IOS) 768 #if !defined(OS_NACL) && !defined(OS_IOS)
770 ScopedProcessWaitActivity::ScopedProcessWaitActivity( 769 ScopedProcessWaitActivity::ScopedProcessWaitActivity(
771 const base::Process* process) 770 const base::Process* process)
772 : GlobalActivityTracker::ScopedThreadActivity( 771 : GlobalActivityTracker::ScopedThreadActivity(
773 nullptr, 772 nullptr,
774 ThreadActivityTracker::ACT_PROCESS_WAIT, 773 Activity::ACT_PROCESS_WAIT,
775 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()), 774 ActivityData::ForProcess(process->Pid()),
776 /*lock_allowed=*/true) {} 775 /*lock_allowed=*/true) {}
777 #endif 776 #endif
778 777
779 } // namespace debug 778 } // namespace debug
780 } // namespace base 779 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698