OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
11 #include "base/string_util.h" | 11 #include "base/string_util.h" |
12 #include "base/stringprintf.h" | 12 #include "base/stringprintf.h" |
13 #include "base/threading/thread_restrictions.h" | 13 #include "base/threading/thread_restrictions.h" |
14 | 14 |
15 using base::TimeDelta; | 15 using base::TimeDelta; |
16 | 16 |
17 namespace tracked_objects { | 17 namespace tracked_objects { |
18 | 18 |
19 // A TLS slot to the TrackRegistry for the current thread. | 19 |
| 20 #if defined(TRACK_ALL_TASK_OBJECTS) |
| 21 static const bool kTrackAllTaskObjects = true; |
| 22 #else |
| 23 static const bool kTrackAllTaskObjects = false; |
| 24 #endif |
| 25 |
| 26 // Can we count on thread termination to call for thread cleanup? If not, then |
| 27 // we can't risk putting references to ThreadData in TLS, as it will leak on |
| 28 // worker thread termination. |
| 29 static const bool kWorkerThreadCleanupSupported = true; |
| 30 |
| 31 // A TLS slot which points to the ThreadData instance for the current thread. We |
| 32 // do a fake initialization here (zeroing out data), and then the real in-place |
| 33 // construction happens when we call tls_index_.Initialize(). |
20 // static | 34 // static |
21 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); | 35 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); |
22 | 36 |
23 // A global state variable to prevent repeated initialization during tests. | 37 // A global state variable to prevent repeated initialization during tests. |
24 // static | 38 // static |
25 AutoTracking::State AutoTracking::state_ = AutoTracking::kNeverBeenRun; | 39 AutoTracking::State AutoTracking::state_ = AutoTracking::kNeverBeenRun; |
26 | 40 |
27 // A locked protected counter to assign sequence number to threads. | 41 // A locked protected counter to assign sequence number to threads. |
28 // static | 42 // static |
29 int ThreadData::thread_number_counter = 0; | 43 int ThreadData::thread_number_counter_ = 0; |
30 | 44 |
31 //------------------------------------------------------------------------------ | 45 //------------------------------------------------------------------------------ |
32 // Death data tallies durations when a death takes place. | 46 // Death data tallies durations when a death takes place. |
33 | 47 |
34 void DeathData::RecordDeath(const TimeDelta& queue_duration, | 48 void DeathData::RecordDeath(const TimeDelta& queue_duration, |
35 const TimeDelta& run_duration) { | 49 const TimeDelta& run_duration) { |
36 ++count_; | 50 ++count_; |
37 queue_duration_ += queue_duration; | 51 queue_duration_ += queue_duration; |
38 run_duration_ += run_duration; | 52 run_duration_ += run_duration; |
39 } | 53 } |
40 | 54 |
41 int DeathData::AverageMsRunDuration() const { | 55 int DeathData::AverageMsRunDuration() const { |
| 56 if (run_duration_ == base::TimeDelta()) |
| 57 return 0; |
42 return static_cast<int>(run_duration_.InMilliseconds() / count_); | 58 return static_cast<int>(run_duration_.InMilliseconds() / count_); |
43 } | 59 } |
44 | 60 |
45 int DeathData::AverageMsQueueDuration() const { | 61 int DeathData::AverageMsQueueDuration() const { |
| 62 if (queue_duration_ == base::TimeDelta()) |
| 63 return 0; |
46 return static_cast<int>(queue_duration_.InMilliseconds() / count_); | 64 return static_cast<int>(queue_duration_.InMilliseconds() / count_); |
47 } | 65 } |
48 | 66 |
49 void DeathData::AddDeathData(const DeathData& other) { | 67 void DeathData::AddDeathData(const DeathData& other) { |
50 count_ += other.count_; | 68 count_ += other.count_; |
51 queue_duration_ += other.queue_duration_; | 69 queue_duration_ += other.queue_duration_; |
52 run_duration_ += other.run_duration_; | 70 run_duration_ += other.run_duration_; |
53 } | 71 } |
54 | 72 |
55 void DeathData::Write(std::string* output) const { | 73 void DeathData::WriteHTML(std::string* output) const { |
56 if (!count_) | 74 if (!count_) |
57 return; | 75 return; |
58 base::StringAppendF(output, "%s:%d, ", | 76 base::StringAppendF(output, "%s:%d, ", |
59 (count_ == 1) ? "Life" : "Lives", count_); | 77 (count_ == 1) ? "Life" : "Lives", count_); |
60 base::StringAppendF(output, "Run:%"PRId64"ms(%dms/life) ", | 78 base::StringAppendF(output, "Run:%"PRId64"ms(%dms/life) ", |
61 run_duration_.InMilliseconds(), | 79 run_duration_.InMilliseconds(), |
62 AverageMsRunDuration()); | 80 AverageMsRunDuration()); |
63 base::StringAppendF(output, "Queue:%"PRId64"ms(%dms/life) ", | 81 base::StringAppendF(output, "Queue:%"PRId64"ms(%dms/life) ", |
64 queue_duration_.InMilliseconds(), | 82 queue_duration_.InMilliseconds(), |
65 AverageMsQueueDuration()); | 83 AverageMsQueueDuration()); |
66 } | 84 } |
67 | 85 |
| 86 base::DictionaryValue* DeathData::ToValue() const { |
| 87 base::DictionaryValue* dictionary = new base::DictionaryValue; |
| 88 dictionary->Set("count", base::Value::CreateIntegerValue(count_)); |
| 89 dictionary->Set("run_ms", |
| 90 base::Value::CreateIntegerValue(run_duration_.InMilliseconds())); |
| 91 dictionary->Set("queue_ms", |
| 92 base::Value::CreateIntegerValue(queue_duration_.InMilliseconds())); |
| 93 return dictionary; |
| 94 } |
| 95 |
68 void DeathData::Clear() { | 96 void DeathData::Clear() { |
69 count_ = 0; | 97 count_ = 0; |
70 queue_duration_ = TimeDelta(); | 98 queue_duration_ = TimeDelta(); |
71 run_duration_ = TimeDelta(); | 99 run_duration_ = TimeDelta(); |
72 } | 100 } |
73 | 101 |
74 //------------------------------------------------------------------------------ | 102 //------------------------------------------------------------------------------ |
75 BirthOnThread::BirthOnThread(const Location& location) | 103 BirthOnThread::BirthOnThread(const Location& location, |
| 104 const ThreadData& current) |
76 : location_(location), | 105 : location_(location), |
77 birth_thread_(ThreadData::Get()) { } | 106 birth_thread_(¤t) {} |
78 | 107 |
79 //------------------------------------------------------------------------------ | 108 //------------------------------------------------------------------------------ |
80 Births::Births(const Location& location) | 109 Births::Births(const Location& location, const ThreadData& current) |
81 : BirthOnThread(location), | 110 : BirthOnThread(location, current), |
82 birth_count_(1) { } | 111 birth_count_(1) { } |
83 | 112 |
84 //------------------------------------------------------------------------------ | 113 //------------------------------------------------------------------------------ |
85 // ThreadData maintains the central data for all births and death. | 114 // ThreadData maintains the central data for all births and deaths. |
86 | 115 |
87 // static | 116 // static |
88 ThreadData* ThreadData::first_ = NULL; | 117 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
| 118 |
| 119 // static |
| 120 ThreadData::ThreadDataPool* ThreadData::unregistered_thread_data_pool_ = NULL; |
| 121 |
89 // static | 122 // static |
90 base::Lock ThreadData::list_lock_; | 123 base::Lock ThreadData::list_lock_; |
91 | 124 |
92 // static | 125 // static |
93 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; | 126 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
94 | 127 |
95 ThreadData::ThreadData(const std::string& suggested_name) : next_(NULL) { | 128 ThreadData::ThreadData(const std::string& suggested_name) |
| 129 : next_(NULL), |
| 130 is_a_worker_thread_(false) { |
96 DCHECK_GE(suggested_name.size(), 0u); | 131 DCHECK_GE(suggested_name.size(), 0u); |
97 thread_name_ = suggested_name; | 132 thread_name_ = suggested_name; |
| 133 PushToHeadOfList(); |
98 } | 134 } |
99 | 135 |
100 ThreadData::ThreadData() : next_(NULL) { | 136 ThreadData::ThreadData() : next_(NULL), is_a_worker_thread_(true) { |
101 int thread_number; | 137 int thread_number; |
102 { | 138 { |
103 base::AutoLock lock(list_lock_); | 139 base::AutoLock lock(list_lock_); |
104 thread_number = ++thread_number_counter; | 140 thread_number = ++thread_number_counter_; |
105 } | 141 } |
106 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); | 142 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
| 143 PushToHeadOfList(); |
107 } | 144 } |
108 | 145 |
109 ThreadData::~ThreadData() {} | 146 ThreadData::~ThreadData() {} |
110 | 147 |
| 148 void ThreadData::PushToHeadOfList() { |
| 149 DCHECK(!next_); |
| 150 base::AutoLock lock(list_lock_); |
| 151 next_ = all_thread_data_list_head_; |
| 152 all_thread_data_list_head_ = this; |
| 153 } |
| 154 |
111 // static | 155 // static |
112 void ThreadData::InitializeThreadContext(const std::string& suggested_name) { | 156 void ThreadData::InitializeThreadContext(const std::string& suggested_name) { |
113 if (!tls_index_.initialized()) | 157 if (!tls_index_.initialized()) |
114 return; // For unittests only. | 158 return; // For unittests only. |
115 RegisterCurrentContext(new ThreadData(suggested_name)); | 159 DCHECK_EQ(tls_index_.Get(), reinterpret_cast<void*>(NULL)); |
| 160 ThreadData* current_thread_data = new ThreadData(suggested_name); |
| 161 tls_index_.Set(current_thread_data); |
116 } | 162 } |
117 | 163 |
118 // static | 164 // static |
119 ThreadData* ThreadData::Get() { | 165 ThreadData* ThreadData::Get() { |
120 if (!tls_index_.initialized()) | 166 if (!tls_index_.initialized()) |
121 return NULL; // For unittests only. | 167 return NULL; // For unittests only. |
122 ThreadData* registered = static_cast<ThreadData*>(tls_index_.Get()); | 168 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); |
123 if (!registered) { | 169 if (registered) |
124 // We have to create a new registry entry for this ThreadData. | 170 return registered; |
125 // TODO(jar): Host all unamed (Worker) threads in *one* ThreadData instance, | 171 |
126 // (with locking protection on that instance) or else recycle and re-use | 172 // We must be a worker thread, since we didn't pre-register. |
127 // worker thread ThreadData when the worker thread terminates. | 173 ThreadData* worker_thread_data = NULL; |
128 registered = RegisterCurrentContext(new ThreadData()); | 174 { |
| 175 base::AutoLock lock(list_lock_); |
| 176 if (!unregistered_thread_data_pool_->empty()) { |
| 177 worker_thread_data = |
| 178 const_cast<ThreadData*>(unregistered_thread_data_pool_->top()); |
| 179 unregistered_thread_data_pool_->pop(); |
| 180 } |
129 } | 181 } |
130 return registered; | 182 |
| 183 // If we can't find a previously used instance, then we have to create one. |
| 184 if (!worker_thread_data) |
| 185 worker_thread_data = new ThreadData(); |
| 186 |
| 187 tls_index_.Set(worker_thread_data); |
| 188 return worker_thread_data; |
131 } | 189 } |
132 | 190 |
133 // static | 191 // static |
134 ThreadData* ThreadData::RegisterCurrentContext(ThreadData* unregistered) { | 192 void ThreadData::OnThreadTermination(void* thread_data) { |
135 DCHECK_EQ(tls_index_.Get(), static_cast<void*>(0)); | 193 if (!kTrackAllTaskObjects) |
136 bool too_late_to_register = false; | 194 return; // Not compiled in. |
137 { | 195 DCHECK(tls_index_.initialized()); |
138 base::AutoLock lock(list_lock_); | 196 if (!thread_data) |
139 // Use lock to insure we have most recent status. | 197 return; |
140 if (!IsActive()) { | 198 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
141 too_late_to_register = true; | 199 DCHECK_EQ(tls_index_.Get(), reinterpret_cast<ThreadData*>(NULL)); |
142 } else { | 200 } |
143 // Use list_lock_ to insert as new head of list. | 201 |
144 unregistered->next_ = first_; | 202 void ThreadData::OnThreadTerminationCleanup() const { |
145 first_ = unregistered; | 203 tls_index_.Set(NULL); |
146 } | 204 if (!is_a_worker_thread_) |
147 } // Release lock. | 205 return; |
148 if (too_late_to_register) { | 206 base::AutoLock lock(list_lock_); |
149 delete unregistered; | 207 unregistered_thread_data_pool_->push(this); |
150 unregistered = NULL; | |
151 } else { | |
152 tls_index_.Set(unregistered); | |
153 } | |
154 return unregistered; | |
155 } | 208 } |
156 | 209 |
157 // static | 210 // static |
158 void ThreadData::WriteHTML(const std::string& query, std::string* output) { | 211 void ThreadData::WriteHTML(const std::string& query, std::string* output) { |
159 if (!ThreadData::IsActive()) | 212 if (!ThreadData::IsActive()) |
160 return; // Not yet initialized. | 213 return; // Not yet initialized. |
161 | 214 |
162 DataCollector collected_data; // Gather data. | 215 DataCollector collected_data; // Gather data. |
163 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 216 collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
164 | 217 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
219 "If you wish to monitor Renderer events, be sure to run in --single-process" | 272 "If you wish to monitor Renderer events, be sure to run in --single-process" |
220 " mode."; | 273 " mode."; |
221 output->append(help_string); | 274 output->append(help_string); |
222 } | 275 } |
223 | 276 |
224 // static | 277 // static |
225 void ThreadData::WriteHTMLTotalAndSubtotals( | 278 void ThreadData::WriteHTMLTotalAndSubtotals( |
226 const DataCollector::Collection& match_array, | 279 const DataCollector::Collection& match_array, |
227 const Comparator& comparator, | 280 const Comparator& comparator, |
228 std::string* output) { | 281 std::string* output) { |
229 if (!match_array.size()) { | 282 if (match_array.empty()) { |
230 output->append("There were no tracked matches."); | 283 output->append("There were no tracked matches."); |
231 } else { | 284 return; |
232 // Aggregate during printing | 285 } |
233 Aggregation totals; | 286 // Aggregate during printing |
234 for (size_t i = 0; i < match_array.size(); ++i) { | 287 Aggregation totals; |
235 totals.AddDeathSnapshot(match_array[i]); | 288 for (size_t i = 0; i < match_array.size(); ++i) { |
| 289 totals.AddDeathSnapshot(match_array[i]); |
| 290 } |
| 291 output->append("Aggregate Stats: "); |
| 292 totals.WriteHTML(output); |
| 293 output->append("<hr><hr>"); |
| 294 |
| 295 Aggregation subtotals; |
| 296 for (size_t i = 0; i < match_array.size(); ++i) { |
| 297 if (0 == i || !comparator.Equivalent(match_array[i - 1], |
| 298 match_array[i])) { |
| 299 // Print group's defining characteristics. |
| 300 comparator.WriteSortGrouping(match_array[i], output); |
| 301 output->append("<br><br>"); |
236 } | 302 } |
237 output->append("Aggregate Stats: "); | 303 comparator.WriteSnapshotHTML(match_array[i], output); |
238 totals.Write(output); | 304 output->append("<br>"); |
239 output->append("<hr><hr>"); | 305 subtotals.AddDeathSnapshot(match_array[i]); |
240 | 306 if (i + 1 >= match_array.size() || |
241 Aggregation subtotals; | 307 !comparator.Equivalent(match_array[i], |
242 for (size_t i = 0; i < match_array.size(); ++i) { | 308 match_array[i + 1])) { |
243 if (0 == i || !comparator.Equivalent(match_array[i - 1], | 309 // Print aggregate stats for the group. |
244 match_array[i])) { | |
245 // Print group's defining characteristics. | |
246 comparator.WriteSortGrouping(match_array[i], output); | |
247 output->append("<br><br>"); | |
248 } | |
249 comparator.WriteSnapshot(match_array[i], output); | |
250 output->append("<br>"); | 310 output->append("<br>"); |
251 subtotals.AddDeathSnapshot(match_array[i]); | 311 subtotals.WriteHTML(output); |
252 if (i + 1 >= match_array.size() || | 312 output->append("<br><hr><br>"); |
253 !comparator.Equivalent(match_array[i], | 313 subtotals.Clear(); |
254 match_array[i + 1])) { | |
255 // Print aggregate stats for the group. | |
256 output->append("<br>"); | |
257 subtotals.Write(output); | |
258 output->append("<br><hr><br>"); | |
259 subtotals.Clear(); | |
260 } | |
261 } | 314 } |
262 } | 315 } |
263 } | 316 } |
264 | 317 |
| 318 // static |
| 319 base::Value* ThreadData::ToValue(int process_type) { |
| 320 DataCollector collected_data; // Gather data. |
| 321 collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
| 322 base::ListValue* list = collected_data.ToValue(); |
| 323 base::DictionaryValue* dictionary = new base::DictionaryValue(); |
| 324 dictionary->Set("list", list); |
| 325 dictionary->SetInteger("process", process_type); |
| 326 return dictionary; |
| 327 } |
| 328 |
265 Births* ThreadData::TallyABirth(const Location& location) { | 329 Births* ThreadData::TallyABirth(const Location& location) { |
266 BirthMap::iterator it = birth_map_.find(location); | 330 BirthMap::iterator it = birth_map_.find(location); |
267 if (it != birth_map_.end()) { | 331 if (it != birth_map_.end()) { |
268 it->second->RecordBirth(); | 332 it->second->RecordBirth(); |
269 return it->second; | 333 return it->second; |
270 } | 334 } |
271 | 335 |
272 Births* tracker = new Births(location); | 336 Births* tracker = new Births(location, *this); |
273 // Lock since the map may get relocated now, and other threads sometimes | 337 // Lock since the map may get relocated now, and other threads sometimes |
274 // snapshot it (but they lock before copying it). | 338 // snapshot it (but they lock before copying it). |
275 base::AutoLock lock(lock_); | 339 base::AutoLock lock(lock_); |
276 birth_map_[location] = tracker; | 340 birth_map_[location] = tracker; |
277 return tracker; | 341 return tracker; |
278 } | 342 } |
279 | 343 |
280 void ThreadData::TallyADeath(const Births& the_birth, | 344 void ThreadData::TallyADeath(const Births& birth, |
281 const TimeDelta& queue_duration, | 345 const TimeDelta& queue_duration, |
282 const TimeDelta& run_duration) { | 346 const TimeDelta& run_duration) { |
283 DeathMap::iterator it = death_map_.find(&the_birth); | 347 DeathMap::iterator it = death_map_.find(&birth); |
| 348 DeathData* death_data; |
284 if (it != death_map_.end()) { | 349 if (it != death_map_.end()) { |
285 it->second.RecordDeath(queue_duration, run_duration); | 350 death_data = &it->second; |
286 return; | 351 } else { |
287 } | 352 base::AutoLock lock(lock_); // Lock since the map may get relocated now. |
288 | 353 death_data = &death_map_[&birth]; |
289 base::AutoLock lock(lock_); // Lock since the map may get relocated now. | 354 } // Release lock ASAP. |
290 death_map_[&the_birth].RecordDeath(queue_duration, run_duration); | 355 death_data->RecordDeath(queue_duration, run_duration); |
291 } | 356 } |
292 | 357 |
293 // static | 358 // static |
294 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 359 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
295 #if !defined(TRACK_ALL_TASK_OBJECTS) | 360 if (!kTrackAllTaskObjects) |
296 return NULL; // Not compiled in. | 361 return NULL; // Not compiled in. |
297 #else | 362 |
298 if (!IsActive()) | 363 if (!IsActive()) |
299 return NULL; | 364 return NULL; |
300 ThreadData* current_thread_data = Get(); | 365 ThreadData* current_thread_data = Get(); |
301 if (!current_thread_data) | 366 if (!current_thread_data) |
302 return NULL; | 367 return NULL; |
303 return current_thread_data->TallyABirth(location); | 368 return current_thread_data->TallyABirth(location); |
304 #endif | |
305 } | 369 } |
306 | 370 |
307 // static | 371 // static |
308 void ThreadData::TallyADeathIfActive(const Births* the_birth, | 372 void ThreadData::TallyADeathIfActive(const Births* birth, |
309 const base::TimeTicks& time_posted, | 373 const base::TimeTicks& time_posted, |
310 const base::TimeTicks& delayed_start_time, | 374 const base::TimeTicks& delayed_start_time, |
311 const base::TimeTicks& start_of_run) { | 375 const base::TimeTicks& start_of_run, |
312 #if !defined(TRACK_ALL_TASK_OBJECTS) | 376 const base::TimeTicks& end_of_run) { |
313 return; // Not compiled in. | 377 if (!kTrackAllTaskObjects) |
314 #else | 378 return; // Not compiled in. |
315 if (!IsActive() || !the_birth) | 379 |
| 380 if (!IsActive() || !birth) |
316 return; | 381 return; |
| 382 |
317 ThreadData* current_thread_data = Get(); | 383 ThreadData* current_thread_data = Get(); |
318 if (!current_thread_data) | 384 if (!current_thread_data) |
319 return; | 385 return; |
320 | 386 |
321 // To avoid conflating our stats with the delay duration in a PostDelayedTask, | 387 // To avoid conflating our stats with the delay duration in a PostDelayedTask, |
322 // we identify such tasks, and replace their post_time with the time they | 388 // we identify such tasks, and replace their post_time with the time they |
323 // were sechudled (requested?) to emerge from the delayed task queue. This | 389 // were sechudled (requested?) to emerge from the delayed task queue. This |
324 // means that queueing delay for such tasks will show how long they went | 390 // means that queueing delay for such tasks will show how long they went |
325 // unserviced, after they *could* be serviced. This is the same stat as we | 391 // unserviced, after they *could* be serviced. This is the same stat as we |
326 // have for non-delayed tasks, and we consistently call it queueing delay. | 392 // have for non-delayed tasks, and we consistently call it queueing delay. |
327 base::TimeTicks effective_post_time = | 393 base::TimeTicks effective_post_time = |
328 (delayed_start_time.is_null()) ? time_posted : delayed_start_time; | 394 (delayed_start_time.is_null()) ? time_posted : delayed_start_time; |
329 base::TimeDelta queue_duration = start_of_run - effective_post_time; | 395 base::TimeDelta queue_duration = start_of_run - effective_post_time; |
330 base::TimeDelta run_duration = Now() - start_of_run; | 396 base::TimeDelta run_duration = end_of_run - start_of_run; |
331 current_thread_data->TallyADeath(*the_birth, queue_duration, run_duration); | 397 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
332 #endif | |
333 } | 398 } |
334 | 399 |
335 // static | 400 // static |
336 ThreadData* ThreadData::first() { | 401 ThreadData* ThreadData::first() { |
337 base::AutoLock lock(list_lock_); | 402 base::AutoLock lock(list_lock_); |
338 return first_; | 403 return all_thread_data_list_head_; |
339 } | 404 } |
340 | 405 |
341 // This may be called from another thread. | 406 // This may be called from another thread. |
342 void ThreadData::SnapshotBirthMap(BirthMap *output) const { | 407 void ThreadData::SnapshotBirthMap(BirthMap *output) const { |
343 base::AutoLock lock(lock_); | 408 base::AutoLock lock(lock_); |
344 for (BirthMap::const_iterator it = birth_map_.begin(); | 409 for (BirthMap::const_iterator it = birth_map_.begin(); |
345 it != birth_map_.end(); ++it) | 410 it != birth_map_.end(); ++it) |
346 (*output)[it->first] = it->second; | 411 (*output)[it->first] = it->second; |
347 } | 412 } |
348 | 413 |
349 // This may be called from another thread. | 414 // This may be called from another thread. |
350 void ThreadData::SnapshotDeathMap(DeathMap *output) const { | 415 void ThreadData::SnapshotDeathMap(DeathMap *output) const { |
351 base::AutoLock lock(lock_); | 416 base::AutoLock lock(lock_); |
352 for (DeathMap::const_iterator it = death_map_.begin(); | 417 for (DeathMap::const_iterator it = death_map_.begin(); |
353 it != death_map_.end(); ++it) | 418 it != death_map_.end(); ++it) |
354 (*output)[it->first] = it->second; | 419 (*output)[it->first] = it->second; |
355 } | 420 } |
356 | 421 |
357 // static | 422 // static |
358 void ThreadData::ResetAllThreadData() { | 423 void ThreadData::ResetAllThreadData() { |
359 ThreadData* my_list = Get()->first(); | 424 ThreadData* my_list = first(); |
360 | 425 |
361 for (ThreadData* thread_data = my_list; | 426 for (ThreadData* thread_data = my_list; |
362 thread_data; | 427 thread_data; |
363 thread_data = thread_data->next()) | 428 thread_data = thread_data->next()) |
364 thread_data->Reset(); | 429 thread_data->Reset(); |
365 } | 430 } |
366 | 431 |
367 void ThreadData::Reset() { | 432 void ThreadData::Reset() { |
368 base::AutoLock lock(lock_); | 433 base::AutoLock lock(lock_); |
369 for (DeathMap::iterator it = death_map_.begin(); | 434 for (DeathMap::iterator it = death_map_.begin(); |
370 it != death_map_.end(); ++it) | 435 it != death_map_.end(); ++it) |
371 it->second.Clear(); | 436 it->second.Clear(); |
372 for (BirthMap::iterator it = birth_map_.begin(); | 437 for (BirthMap::iterator it = birth_map_.begin(); |
373 it != birth_map_.end(); ++it) | 438 it != birth_map_.end(); ++it) |
374 it->second->Clear(); | 439 it->second->Clear(); |
375 } | 440 } |
376 | 441 |
377 // static | 442 // static |
378 bool ThreadData::StartTracking(bool status) { | 443 bool ThreadData::StartTracking(bool status) { |
379 #if !defined(TRACK_ALL_TASK_OBJECTS) | 444 if (!kTrackAllTaskObjects) |
380 return false; // Not compiled in. | 445 return false; // Not compiled in. |
381 #else | 446 |
| 447 // Do a bit of class initialization. |
| 448 if (!unregistered_thread_data_pool_) { |
| 449 ThreadDataPool* initial_pool = new ThreadDataPool; |
| 450 { |
| 451 base::AutoLock lock(list_lock_); |
| 452 if (!unregistered_thread_data_pool_) { |
| 453 unregistered_thread_data_pool_ = initial_pool; |
| 454 initial_pool = NULL; |
| 455 } |
| 456 } |
| 457 delete initial_pool; // In case it was not used. |
| 458 } |
| 459 |
| 460 // Perform the "real" initialization now, and leave it intact through |
| 461 // process termination. |
| 462 if (!tls_index_.initialized()) |
| 463 tls_index_.Initialize(&ThreadData::OnThreadTermination); |
| 464 DCHECK(tls_index_.initialized()); |
| 465 |
382 if (!status) { | 466 if (!status) { |
383 base::AutoLock lock(list_lock_); | 467 base::AutoLock lock(list_lock_); |
384 DCHECK(status_ == ACTIVE || status_ == SHUTDOWN); | 468 DCHECK(status_ == ACTIVE || status_ == SHUTDOWN); |
385 status_ = SHUTDOWN; | 469 status_ = SHUTDOWN; |
386 return true; | 470 return true; |
387 } | 471 } |
388 base::AutoLock lock(list_lock_); | 472 base::AutoLock lock(list_lock_); |
389 DCHECK_EQ(UNINITIALIZED, status_); | 473 DCHECK_EQ(UNINITIALIZED, status_); |
390 CHECK(tls_index_.Initialize(NULL)); | |
391 status_ = ACTIVE; | 474 status_ = ACTIVE; |
392 return true; | 475 return true; |
393 #endif | |
394 } | 476 } |
395 | 477 |
396 // static | 478 // static |
397 bool ThreadData::IsActive() { | 479 bool ThreadData::IsActive() { |
398 return status_ == ACTIVE; | 480 return status_ == ACTIVE; |
399 } | 481 } |
400 | 482 |
401 // static | 483 // static |
402 base::TimeTicks ThreadData::Now() { | 484 base::TimeTicks ThreadData::Now() { |
403 #if defined(TRACK_ALL_TASK_OBJECTS) | 485 if (kTrackAllTaskObjects && status_ == ACTIVE) |
404 if (status_ == ACTIVE) | |
405 return base::TimeTicks::Now(); | 486 return base::TimeTicks::Now(); |
406 #endif | |
407 return base::TimeTicks(); // Super fast when disabled, or not compiled in. | 487 return base::TimeTicks(); // Super fast when disabled, or not compiled in. |
408 } | 488 } |
409 | 489 |
410 // static | 490 // static |
411 void ThreadData::ShutdownSingleThreadedCleanup() { | 491 void ThreadData::ShutdownSingleThreadedCleanup() { |
| 492 // This is only called from test code, where we need to cleanup so that |
| 493 // additional tests can be run. |
412 // We must be single threaded... but be careful anyway. | 494 // We must be single threaded... but be careful anyway. |
413 if (!StartTracking(false)) | 495 if (!StartTracking(false)) |
414 return; | 496 return; |
415 ThreadData* thread_data_list; | 497 ThreadData* thread_data_list; |
| 498 ThreadDataPool* final_pool; |
416 { | 499 { |
417 base::AutoLock lock(list_lock_); | 500 base::AutoLock lock(list_lock_); |
418 thread_data_list = first_; | 501 thread_data_list = all_thread_data_list_head_; |
419 first_ = NULL; | 502 all_thread_data_list_head_ = NULL; |
| 503 final_pool = unregistered_thread_data_pool_; |
| 504 unregistered_thread_data_pool_ = NULL; |
420 } | 505 } |
421 | 506 |
| 507 if (final_pool) { |
| 508 // The thread_data_list contains *all* the instances, and we'll use it to |
| 509 // delete them. This pool has pointers to some instances, and we just |
| 510 // have to drop those pointers (and not do the deletes here). |
| 511 while (!final_pool->empty()) |
| 512 final_pool->pop(); |
| 513 delete final_pool; |
| 514 } |
| 515 |
| 516 // Do actual recursive delete in all ThreadData instances. |
422 while (thread_data_list) { | 517 while (thread_data_list) { |
423 ThreadData* next_thread_data = thread_data_list; | 518 ThreadData* next_thread_data = thread_data_list; |
424 thread_data_list = thread_data_list->next(); | 519 thread_data_list = thread_data_list->next(); |
425 | 520 |
426 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 521 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
427 next_thread_data->birth_map_.end() != it; ++it) | 522 next_thread_data->birth_map_.end() != it; ++it) |
428 delete it->second; // Delete the Birth Records. | 523 delete it->second; // Delete the Birth Records. |
429 next_thread_data->birth_map_.clear(); | 524 next_thread_data->birth_map_.clear(); |
430 next_thread_data->death_map_.clear(); | 525 next_thread_data->death_map_.clear(); |
431 delete next_thread_data; // Includes all Death Records. | 526 delete next_thread_data; // Includes all Death Records. |
432 } | 527 } |
433 | 528 // Put most global static back in pristine shape. |
434 CHECK(tls_index_.initialized()); | 529 thread_number_counter_ = 0; |
435 tls_index_.Free(); | 530 tls_index_.Set(NULL); |
436 DCHECK(!tls_index_.initialized()); | |
437 status_ = UNINITIALIZED; | 531 status_ = UNINITIALIZED; |
438 } | 532 } |
439 | 533 |
440 //------------------------------------------------------------------------------ | 534 //------------------------------------------------------------------------------ |
441 // Individual 3-tuple of birth (place and thread) along with death thread, and | 535 // Individual 3-tuple of birth (place and thread) along with death thread, and |
442 // the accumulated stats for instances (DeathData). | 536 // the accumulated stats for instances (DeathData). |
443 | 537 |
444 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, | 538 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, |
445 const ThreadData& death_thread, | 539 const ThreadData& death_thread, |
446 const DeathData& death_data) | 540 const DeathData& death_data) |
447 : birth_(&birth_on_thread), | 541 : birth_(&birth_on_thread), |
448 death_thread_(&death_thread), | 542 death_thread_(&death_thread), |
449 death_data_(death_data) { | 543 death_data_(death_data) { |
450 } | 544 } |
451 | 545 |
452 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count) | 546 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count) |
453 : birth_(&birth_on_thread), | 547 : birth_(&birth_on_thread), |
454 death_thread_(NULL), | 548 death_thread_(NULL), |
455 death_data_(DeathData(count)) { | 549 death_data_(DeathData(count)) { |
456 } | 550 } |
457 | 551 |
458 const std::string Snapshot::DeathThreadName() const { | 552 const std::string Snapshot::DeathThreadName() const { |
459 if (death_thread_) | 553 if (death_thread_) |
460 return death_thread_->thread_name(); | 554 return death_thread_->thread_name(); |
461 return "Still_Alive"; | 555 return "Still_Alive"; |
462 } | 556 } |
463 | 557 |
464 void Snapshot::Write(std::string* output) const { | 558 void Snapshot::WriteHTML(std::string* output) const { |
465 death_data_.Write(output); | 559 death_data_.WriteHTML(output); |
466 base::StringAppendF(output, "%s->%s ", | 560 base::StringAppendF(output, "%s->%s ", |
467 birth_->birth_thread()->thread_name().c_str(), | 561 birth_->birth_thread()->thread_name().c_str(), |
468 death_thread_->thread_name().c_str()); | 562 DeathThreadName().c_str()); |
469 birth_->location().Write(true, true, output); | 563 birth_->location().Write(true, true, output); |
470 } | 564 } |
471 | 565 |
| 566 base::DictionaryValue* Snapshot::ToValue() const { |
| 567 base::DictionaryValue* dictionary = new base::DictionaryValue; |
| 568 dictionary->Set("death_data", death_data_.ToValue()); |
| 569 dictionary->Set("birth_thread", |
| 570 base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); |
| 571 dictionary->Set("death_thread", |
| 572 base::Value::CreateStringValue(DeathThreadName())); |
| 573 dictionary->Set("location", birth_->location().ToValue()); |
| 574 return dictionary; |
| 575 } |
| 576 |
472 void Snapshot::Add(const Snapshot& other) { | 577 void Snapshot::Add(const Snapshot& other) { |
473 death_data_.AddDeathData(other.death_data_); | 578 death_data_.AddDeathData(other.death_data_); |
474 } | 579 } |
475 | 580 |
476 //------------------------------------------------------------------------------ | 581 //------------------------------------------------------------------------------ |
477 // DataCollector | 582 // DataCollector |
478 | 583 |
479 DataCollector::DataCollector() { | 584 DataCollector::DataCollector() { |
480 DCHECK(ThreadData::IsActive()); | 585 if (!ThreadData::IsActive()) |
| 586 return; |
481 | 587 |
482 // Get an unchanging copy of a ThreadData list. | 588 // Get an unchanging copy of a ThreadData list. |
483 ThreadData* my_list = ThreadData::Get()->first(); | 589 ThreadData* my_list = ThreadData::first(); |
484 | 590 |
485 // Gather data serially. | 591 // Gather data serially. |
486 // This hackish approach *can* get some slighly corrupt tallies, as we are | 592 // This hackish approach *can* get some slighly corrupt tallies, as we are |
487 // grabbing values without the protection of a lock, but it has the advantage | 593 // grabbing values without the protection of a lock, but it has the advantage |
488 // of working even with threads that don't have message loops. If a user | 594 // of working even with threads that don't have message loops. If a user |
489 // sees any strangeness, they can always just run their stats gathering a | 595 // sees any strangeness, they can always just run their stats gathering a |
490 // second time. | 596 // second time. |
491 for (ThreadData* thread_data = my_list; | 597 for (ThreadData* thread_data = my_list; |
492 thread_data; | 598 thread_data; |
493 thread_data = thread_data->next()) { | 599 thread_data = thread_data->next()) { |
(...skipping 28 matching lines...) Expand all Loading... |
522 } | 628 } |
523 | 629 |
524 void DataCollector::AddListOfLivingObjects() { | 630 void DataCollector::AddListOfLivingObjects() { |
525 for (BirthCount::iterator it = global_birth_count_.begin(); | 631 for (BirthCount::iterator it = global_birth_count_.begin(); |
526 it != global_birth_count_.end(); ++it) { | 632 it != global_birth_count_.end(); ++it) { |
527 if (it->second > 0) | 633 if (it->second > 0) |
528 collection_.push_back(Snapshot(*it->first, it->second)); | 634 collection_.push_back(Snapshot(*it->first, it->second)); |
529 } | 635 } |
530 } | 636 } |
531 | 637 |
| 638 base::ListValue* DataCollector::ToValue() const { |
| 639 base::ListValue* list = new base::ListValue; |
| 640 for (size_t i = 0; i < collection_.size(); ++i) { |
| 641 list->Append(collection_[i].ToValue()); |
| 642 } |
| 643 return list; |
| 644 } |
| 645 |
532 //------------------------------------------------------------------------------ | 646 //------------------------------------------------------------------------------ |
533 // Aggregation | 647 // Aggregation |
534 | 648 |
535 Aggregation::Aggregation() | 649 Aggregation::Aggregation() |
536 : birth_count_(0) { | 650 : birth_count_(0) { |
537 } | 651 } |
538 | 652 |
539 Aggregation::~Aggregation() { | 653 Aggregation::~Aggregation() { |
540 } | 654 } |
541 | 655 |
(...skipping 10 matching lines...) Expand all Loading... |
552 void Aggregation::AddBirth(const BirthOnThread& birth) { | 666 void Aggregation::AddBirth(const BirthOnThread& birth) { |
553 AddBirthPlace(birth.location()); | 667 AddBirthPlace(birth.location()); |
554 birth_threads_[birth.birth_thread()]++; | 668 birth_threads_[birth.birth_thread()]++; |
555 } | 669 } |
556 | 670 |
557 void Aggregation::AddBirthPlace(const Location& location) { | 671 void Aggregation::AddBirthPlace(const Location& location) { |
558 locations_[location]++; | 672 locations_[location]++; |
559 birth_files_[location.file_name()]++; | 673 birth_files_[location.file_name()]++; |
560 } | 674 } |
561 | 675 |
562 void Aggregation::Write(std::string* output) const { | 676 void Aggregation::WriteHTML(std::string* output) const { |
563 if (locations_.size() == 1) { | 677 if (locations_.size() == 1) { |
564 locations_.begin()->first.Write(true, true, output); | 678 locations_.begin()->first.Write(true, true, output); |
565 } else { | 679 } else { |
566 base::StringAppendF(output, "%" PRIuS " Locations. ", locations_.size()); | 680 base::StringAppendF(output, "%" PRIuS " Locations. ", locations_.size()); |
567 if (birth_files_.size() > 1) { | 681 if (birth_files_.size() > 1) { |
568 base::StringAppendF(output, "%" PRIuS " Files. ", birth_files_.size()); | 682 base::StringAppendF(output, "%" PRIuS " Files. ", birth_files_.size()); |
569 } else { | 683 } else { |
570 base::StringAppendF(output, "All born in %s. ", | 684 base::StringAppendF(output, "All born in %s. ", |
571 birth_files_.begin()->first.c_str()); | 685 birth_files_.begin()->first.c_str()); |
572 } | 686 } |
(...skipping 15 matching lines...) Expand all Loading... |
588 base::StringAppendF(output, "All deleted on %s. ", | 702 base::StringAppendF(output, "All deleted on %s. ", |
589 death_threads_.begin()->first->thread_name().c_str()); | 703 death_threads_.begin()->first->thread_name().c_str()); |
590 } else { | 704 } else { |
591 output->append("All these objects are still alive."); | 705 output->append("All these objects are still alive."); |
592 } | 706 } |
593 } | 707 } |
594 | 708 |
595 if (birth_count_ > 1) | 709 if (birth_count_ > 1) |
596 base::StringAppendF(output, "Births=%d ", birth_count_); | 710 base::StringAppendF(output, "Births=%d ", birth_count_); |
597 | 711 |
598 DeathData::Write(output); | 712 DeathData::WriteHTML(output); |
599 } | 713 } |
600 | 714 |
601 void Aggregation::Clear() { | 715 void Aggregation::Clear() { |
602 birth_count_ = 0; | 716 birth_count_ = 0; |
603 birth_files_.clear(); | 717 birth_files_.clear(); |
604 locations_.clear(); | 718 locations_.clear(); |
605 birth_threads_.clear(); | 719 birth_threads_.clear(); |
606 DeathData::Clear(); | 720 DeathData::Clear(); |
607 death_threads_.clear(); | 721 death_threads_.clear(); |
608 } | 722 } |
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
938 | 1052 |
939 default: | 1053 default: |
940 break; | 1054 break; |
941 } | 1055 } |
942 if (tiebreaker_ && !use_tiebreaker_for_sort_only_) { | 1056 if (tiebreaker_ && !use_tiebreaker_for_sort_only_) { |
943 wrote_data |= tiebreaker_->WriteSortGrouping(sample, output); | 1057 wrote_data |= tiebreaker_->WriteSortGrouping(sample, output); |
944 } | 1058 } |
945 return wrote_data; | 1059 return wrote_data; |
946 } | 1060 } |
947 | 1061 |
948 void Comparator::WriteSnapshot(const Snapshot& sample, | 1062 void Comparator::WriteSnapshotHTML(const Snapshot& sample, |
949 std::string* output) const { | 1063 std::string* output) const { |
950 sample.death_data().Write(output); | 1064 sample.death_data().WriteHTML(output); |
951 if (!(combined_selectors_ & BIRTH_THREAD) || | 1065 if (!(combined_selectors_ & BIRTH_THREAD) || |
952 !(combined_selectors_ & DEATH_THREAD)) | 1066 !(combined_selectors_ & DEATH_THREAD)) |
953 base::StringAppendF(output, "%s->%s ", | 1067 base::StringAppendF(output, "%s->%s ", |
954 (combined_selectors_ & BIRTH_THREAD) ? "*" : | 1068 (combined_selectors_ & BIRTH_THREAD) ? "*" : |
955 sample.birth().birth_thread()->thread_name().c_str(), | 1069 sample.birth().birth_thread()->thread_name().c_str(), |
956 (combined_selectors_ & DEATH_THREAD) ? "*" : | 1070 (combined_selectors_ & DEATH_THREAD) ? "*" : |
957 sample.DeathThreadName().c_str()); | 1071 sample.DeathThreadName().c_str()); |
958 sample.birth().location().Write(!(combined_selectors_ & BIRTH_FILE), | 1072 sample.birth().location().Write(!(combined_selectors_ & BIRTH_FILE), |
959 !(combined_selectors_ & BIRTH_FUNCTION), | 1073 !(combined_selectors_ & BIRTH_FUNCTION), |
960 output); | 1074 output); |
961 } | 1075 } |
962 | 1076 |
963 } // namespace tracked_objects | 1077 } // namespace tracked_objects |
OLD | NEW |