Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(377)

Side by Side Diff: base/tracked_objects.h

Issue 1021053003: Delivering the FIRST_NONEMPTY_PAINT phase changing event to base/ (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@phase_splitting
Patch Set: More comments. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | base/tracked_objects.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACKED_OBJECTS_H_ 5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_
7 7
8 #include <map> 8 #include <map>
9 #include <set> 9 #include <set>
10 #include <stack> 10 #include <stack>
11 #include <string> 11 #include <string>
12 #include <utility> 12 #include <utility>
13 #include <vector> 13 #include <vector>
14 14
15 #include "base/base_export.h" 15 #include "base/base_export.h"
16 #include "base/basictypes.h" 16 #include "base/basictypes.h"
17 #include "base/containers/hash_tables.h" 17 #include "base/containers/hash_tables.h"
18 #include "base/gtest_prod_util.h" 18 #include "base/gtest_prod_util.h"
19 #include "base/lazy_instance.h" 19 #include "base/lazy_instance.h"
20 #include "base/location.h" 20 #include "base/location.h"
21 #include "base/process/process_handle.h" 21 #include "base/process/process_handle.h"
22 #include "base/profiler/alternate_timer.h" 22 #include "base/profiler/alternate_timer.h"
23 #include "base/profiler/tracked_time.h" 23 #include "base/profiler/tracked_time.h"
24 #include "base/synchronization/lock.h" 24 #include "base/synchronization/lock.h"
25 #include "base/threading/thread_checker.h"
25 #include "base/threading/thread_local_storage.h" 26 #include "base/threading/thread_local_storage.h"
26 27
27 namespace base { 28 namespace base {
28 struct TrackingInfo; 29 struct TrackingInfo;
29 } 30 }
30 31
31 // TrackedObjects provides a database of stats about objects (generally Tasks) 32 // TrackedObjects provides a database of stats about objects (generally Tasks)
32 // that are tracked. Tracking means their birth, death, duration, birth thread, 33 // that are tracked. Tracking means their birth, death, duration, birth thread,
33 // death thread, and birth place are recorded. This data is carefully spread 34 // death thread, and birth place are recorded. This data is carefully spread
34 // across a series of objects so that the counts and times can be rapidly 35 // across a series of objects so that the counts and times can be rapidly
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 // can find out a Task's location of birth, and thread of birth, without using 79 // can find out a Task's location of birth, and thread of birth, without using
79 // any locks, as all that data is constant across the life of the process. 80 // any locks, as all that data is constant across the life of the process.
80 // 81 //
81 // The above work *could* also be done for any other object as well by calling 82 // The above work *could* also be done for any other object as well by calling
82 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate. 83 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
83 // 84 //
84 // The amount of memory used in the above data structures depends on how many 85 // The amount of memory used in the above data structures depends on how many
85 // threads there are, and how many Locations of construction there are. 86 // threads there are, and how many Locations of construction there are.
86 // Fortunately, we don't use memory that is the product of those two counts, but 87 // Fortunately, we don't use memory that is the product of those two counts, but
87 // rather we only need one Births instance for each thread that constructs an 88 // rather we only need one Births instance for each thread that constructs an
88 // instance at a Location. In many cases, instances are only created on one 89 // instance at a Location. In many cases, instances are only created on one
89 // thread, so the memory utilization is actually fairly restrained. 90 // thread, so the memory utilization is actually fairly restrained.
90 // 91 //
91 // Lastly, when an instance is deleted, the final tallies of statistics are 92 // Lastly, when an instance is deleted, the final tallies of statistics are
92 // carefully accumulated. That tallying writes into slots (members) in a 93 // carefully accumulated. That tallying writes into slots (members) in a
93 // collection of DeathData instances. For each birth place Location that is 94 // collection of DeathData instances. For each birth place Location that is
94 // destroyed on a thread, there is a DeathData instance to record the additional 95 // destroyed on a thread, there is a DeathData instance to record the additional
95 // death count, as well as accumulate the run-time and queue-time durations for 96 // death count, as well as accumulate the run-time and queue-time durations for
96 // the instance as it is destroyed (dies). By maintaining a single place to 97 // the instance as it is destroyed (dies). By maintaining a single place to
97 // aggregate this running sum *only* for the given thread, we avoid the need to 98 // aggregate this running sum *only* for the given thread, we avoid the need to
98 // lock such DeathData instances. (i.e., these accumulated stats in a DeathData 99 // lock such DeathData instances. (i.e., these accumulated stats in a DeathData
99 // instance are exclusively updated by the singular owning thread). 100 // instance are exclusively updated by the singular owning thread).
100 // 101 //
101 // With the above life cycle description complete, the major remaining detail 102 // With the above life cycle description complete, the major remaining detail
102 // is explaining how each thread maintains a list of DeathData instances, and 103 // is explaining how each thread maintains a list of DeathData instances, and
103 // of Births instances, and is able to avoid additional (redundant/unnecessary) 104 // of Births instances, and is able to avoid additional (redundant/unnecessary)
104 // allocations. 105 // allocations.
105 // 106 //
106 // Each thread maintains a list of data items specific to that thread in a 107 // Each thread maintains a list of data items specific to that thread in a
107 // ThreadData instance (for that specific thread only). The two critical items 108 // ThreadData instance (for that specific thread only). The two critical items
108 // are lists of DeathData and Births instances. These lists are maintained in 109 // are lists of DeathData and Births instances. These lists are maintained in
109 // STL maps, which are indexed by Location. As noted earlier, we can compare 110 // STL maps, which are indexed by Location. As noted earlier, we can compare
110 // locations very efficiently as we consider the underlying data (file, 111 // locations very efficiently as we consider the underlying data (file,
111 // function, line) to be atoms, and hence pointer comparison is used rather than 112 // function, line) to be atoms, and hence pointer comparison is used rather than
112 // (slow) string comparisons. 113 // (slow) string comparisons.
113 // 114 //
114 // To provide a mechanism for iterating over all "known threads," which means 115 // To provide a mechanism for iterating over all "known threads," which means
115 // threads that have recorded a birth or a death, we create a singly linked list 116 // threads that have recorded a birth or a death, we create a singly linked list
116 // of ThreadData instances. Each such instance maintains a pointer to the next 117 // of ThreadData instances. Each such instance maintains a pointer to the next
117 // one. A static member of ThreadData provides a pointer to the first item on 118 // one. A static member of ThreadData provides a pointer to the first item on
118 // this global list, and access via that all_thread_data_list_head_ item 119 // this global list, and access via that all_thread_data_list_head_ item
119 // requires the use of the list_lock_. 120 // requires the use of the list_lock_.
120 // When new ThreadData instances is added to the global list, it is pre-pended, 121 // When new ThreadData instances is added to the global list, it is pre-pended,
121 // which ensures that any prior acquisition of the list is valid (i.e., the 122 // which ensures that any prior acquisition of the list is valid (i.e., the
122 // holder can iterate over it without fear of it changing, or the necessity of 123 // holder can iterate over it without fear of it changing, or the necessity of
123 // using an additional lock. Iterations are actually pretty rare (used 124 // using an additional lock. Iterations are actually pretty rare (used
124 // primarily for cleanup, or snapshotting data for display), so this lock has 125 // primarily for cleanup, or snapshotting data for display), so this lock has
125 // very little global performance impact. 126 // very little global performance impact.
126 // 127 //
(...skipping 14 matching lines...) Expand all
141 // (3) the snapshotted data. 142 // (3) the snapshotted data.
142 // 143 //
143 // For a given birth location, information about births is spread across data 144 // For a given birth location, information about births is spread across data
144 // structures that are asynchronously changing on various threads. For 145 // structures that are asynchronously changing on various threads. For
145 // serialization and display purposes, we need to construct TaskSnapshot 146 // serialization and display purposes, we need to construct TaskSnapshot
146 // instances for each combination of birth thread, death thread, and location, 147 // instances for each combination of birth thread, death thread, and location,
147 // along with the count of such lifetimes. We gather such data into a 148 // along with the count of such lifetimes. We gather such data into a
148 // TaskSnapshot instances, so that such instances can be sorted and 149 // TaskSnapshot instances, so that such instances can be sorted and
149 // aggregated (and remain frozen during our processing). 150 // aggregated (and remain frozen during our processing).
150 // 151 //
151 // Profiling consists of phases. The concrete phase in the sequence of phases is 152 // Profiling consists of phases. The concrete phase in the sequence of phases
152 // identified by its 0-based index. 153 // is identified by its 0-based index.
153 // 154 //
154 // The ProcessDataPhaseSnapshot struct is a serialized representation of the 155 // The ProcessDataPhaseSnapshot struct is a serialized representation of the
155 // list of ThreadData objects for a process for a concrete profiling phase. It 156 // list of ThreadData objects for a process for a concrete profiling phase. It
156 // holds a set of TaskSnapshots and tracks parent/child relationships for the 157 // holds a set of TaskSnapshots and tracks parent/child relationships for the
157 // executed tasks. The statistics in a snapshot are gathered asynhcronously 158 // executed tasks. The statistics in a snapshot are gathered asynhcronously
158 // relative to their ongoing updates. 159 // relative to their ongoing updates.
159 // It is possible, though highly unlikely, that stats could be incorrectly 160 // It is possible, though highly unlikely, that stats could be incorrectly
160 // recorded by this process (all data is held in 32 bit ints, but we are not 161 // recorded by this process (all data is held in 32 bit ints, but we are not
161 // atomically collecting all data, so we could have count that does not, for 162 // atomically collecting all data, so we could have count that does not, for
162 // example, match with the number of durations we accumulated). The advantage 163 // example, match with the number of durations we accumulated). The advantage
163 // to having fast (non-atomic) updates of the data outweighs the minimal risk of 164 // to having fast (non-atomic) updates of the data outweighs the minimal risk of
164 // a singular corrupt statistic snapshot (only the snapshot could be corrupt, 165 // a singular corrupt statistic snapshot (only the snapshot could be corrupt,
165 // not the underlying and ongoing statistic). In contrast, pointer data that 166 // not the underlying and ongoing statistic). In contrast, pointer data that
166 // is accessed during snapshotting is completely invariant, and hence is 167 // is accessed during snapshotting is completely invariant, and hence is
167 // perfectly acquired (i.e., no potential corruption, and no risk of a bad 168 // perfectly acquired (i.e., no potential corruption, and no risk of a bad
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
247 void RecordBirth(); 248 void RecordBirth();
248 249
249 private: 250 private:
250 // The number of births on this thread for our location_. 251 // The number of births on this thread for our location_.
251 int birth_count_; 252 int birth_count_;
252 253
253 DISALLOW_COPY_AND_ASSIGN(Births); 254 DISALLOW_COPY_AND_ASSIGN(Births);
254 }; 255 };
255 256
256 //------------------------------------------------------------------------------ 257 //------------------------------------------------------------------------------
257 // Basic info summarizing multiple destructions of a tracked object with a 258 // A "snapshotted" representation of the DeathData class.
258 // single birthplace (fixed Location). Used both on specific threads, and also 259
259 // in snapshots when integrating assembled data. 260 struct BASE_EXPORT DeathDataSnapshot {
261 DeathDataSnapshot();
262
263 // Constructs the snapshot from individual values.
264 // The alternative would be taking a DeathData parameter, but this would
265 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing
266 // a wrapper structure as a param or using an empty constructor for
267 // snapshotting DeathData would be less efficient.
268 DeathDataSnapshot(int count,
269 int32 run_duration_sum,
270 int32 run_duration_max,
271 int32 run_duration_sample,
272 int32 queue_duration_sum,
273 int32 queue_duration_max,
274 int32 queue_duration_sample);
275 ~DeathDataSnapshot();
276
277 // Calculates and returns the delta between this snapshot and an earlier
278 // snapshot of the same task |older|.
279 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
280
281 int count;
282 int32 run_duration_sum;
283 int32 run_duration_max;
284 int32 run_duration_sample;
285 int32 queue_duration_sum;
286 int32 queue_duration_max;
287 int32 queue_duration_sample;
288 };
289
290 //------------------------------------------------------------------------------
291 // A "snapshotted" representation of the DeathData for a particular profiling
292 // phase. Used as an element of the list of phase snapshots owned by DeathData.
293
294 struct DeathDataPhaseSnapshot {
295 DeathDataPhaseSnapshot(int profiling_phase,
296 int count,
297 int32 run_duration_sum,
298 int32 run_duration_max,
299 int32 run_duration_sample,
300 int32 queue_duration_sum,
301 int32 queue_duration_max,
302 int32 queue_duration_sample,
303 const DeathDataPhaseSnapshot* prev);
304
305 // Profiling phase at which completion this snapshot was taken.
306 int profiling_phase;
307
308 // Death data snapshot.
309 DeathDataSnapshot death_data;
310
311 // Pointer to a snapshot from the previous phase.
312 const DeathDataPhaseSnapshot* prev;
313 };
314
315 //------------------------------------------------------------------------------
316 // Information about deaths of a task on a given thread, called "death thread".
317 // Access to members of this class is never protected by a lock. The fields
318 // are accessed in such a way that corruptions resulting from race conditions
319 // are not significant, and don't accumulate as a result of multiple accesses.
320 // All invocations of DeathData::OnProfilingPhaseCompleted and
321 // ThreadData::SnapshotMaps (which takes DeathData snapshot) in a given process
322 // must be called from the same thread. It doesn't matter what thread it is, but
323 // it's important the same thread is used as a snapshot thread during the whole
324 // process lifetime. All fields except sample_probability_count_ can be
325 // snapshotted.
260 326
261 class BASE_EXPORT DeathData { 327 class BASE_EXPORT DeathData {
262 public: 328 public:
263 // Default initializer.
264 DeathData(); 329 DeathData();
265 330 DeathData(const DeathData& other);
266 // When deaths have not yet taken place, and we gather data from all the 331 ~DeathData();
267 // threads, we create DeathData stats that tally the number of births without
268 // a corresponding death.
269 explicit DeathData(int count);
270 332
271 // Update stats for a task destruction (death) that had a Run() time of 333 // Update stats for a task destruction (death) that had a Run() time of
272 // |duration|, and has had a queueing delay of |queue_duration|. 334 // |duration|, and has had a queueing delay of |queue_duration|.
273 void RecordDeath(const int32 queue_duration, 335 void RecordDeath(const int32 queue_duration,
274 const int32 run_duration, 336 const int32 run_duration,
275 const uint32 random_number); 337 const uint32 random_number);
276 338
277 // Metrics accessors, used only for serialization and in tests. 339 // Metrics and past snapshots accessors, used only for serialization and in
340 // tests.
278 int count() const; 341 int count() const;
279 int32 run_duration_sum() const; 342 int32 run_duration_sum() const;
280 int32 run_duration_max() const; 343 int32 run_duration_max() const;
281 int32 run_duration_sample() const; 344 int32 run_duration_sample() const;
282 int32 queue_duration_sum() const; 345 int32 queue_duration_sum() const;
283 int32 queue_duration_max() const; 346 int32 queue_duration_max() const;
284 int32 queue_duration_sample() const; 347 int32 queue_duration_sample() const;
348 const DeathDataPhaseSnapshot* last_phase_snapshot() const;
285 349
286 // Reset all tallies to zero. This is used as a hack on realtime data. 350 // Called when the current profiling phase, identified by |profiling_phase|,
287 void Clear(); 351 // ends.
352 // Must be called only on the snapshot thread.
353 void OnProfilingPhaseCompleted(int profiling_phase);
288 354
289 private: 355 private:
290 // Members are ordered from most regularly read and updated, to least 356 // Members are ordered from most regularly read and updated, to least
291 // frequently used. This might help a bit with cache lines. 357 // frequently used. This might help a bit with cache lines.
292 // Number of runs seen (divisor for calculating averages). 358 // Number of runs seen (divisor for calculating averages).
359 // Can be incremented only on the death thread.
293 int count_; 360 int count_;
294 // Basic tallies, used to compute averages. 361
362 // Count used in determining probability of selecting exec/queue times from a
363 // recorded death as samples.
364 // Gets incremented only on the death thread, but can be set to 0 by
365 // OnProfilingPhaseCompleted() on the snapshot thread.
366 int sample_probability_count_;
367
368 // Basic tallies, used to compute averages. Can be incremented only on the
369 // death thread.
295 int32 run_duration_sum_; 370 int32 run_duration_sum_;
296 int32 queue_duration_sum_; 371 int32 queue_duration_sum_;
297 // Max values, used by local visualization routines. These are often read, 372 // Max values, used by local visualization routines. These are often read,
298 // but rarely updated. 373 // but rarely updated. The max values get assigned only on the death thread,
374 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
375 // snapshot thread.
299 int32 run_duration_max_; 376 int32 run_duration_max_;
300 int32 queue_duration_max_; 377 int32 queue_duration_max_;
301 // Samples, used by crowd sourcing gatherers. These are almost never read, 378 // Samples, used by crowd sourcing gatherers. These are almost never read,
302 // and rarely updated. 379 // and rarely updated. They can be modified only on the death thread.
303 int32 run_duration_sample_; 380 int32 run_duration_sample_;
304 int32 queue_duration_sample_; 381 int32 queue_duration_sample_;
382
383 // Snapshot of this death data made at the last profiling phase completion, if
384 // any. DeathData owns the whole list starting with this pointer.
385 // Can be accessed only on the snapshot thread.
386 const DeathDataPhaseSnapshot* last_phase_snapshot_;
387
388 DISALLOW_ASSIGN(DeathData);
305 }; 389 };
306 390
307 //------------------------------------------------------------------------------ 391 //------------------------------------------------------------------------------
308 // A "snapshotted" representation of the DeathData class.
309
310 struct BASE_EXPORT DeathDataSnapshot {
311 DeathDataSnapshot();
312 explicit DeathDataSnapshot(const DeathData& death_data);
313 ~DeathDataSnapshot();
314
315 int count;
316 int32 run_duration_sum;
317 int32 run_duration_max;
318 int32 run_duration_sample;
319 int32 queue_duration_sum;
320 int32 queue_duration_max;
321 int32 queue_duration_sample;
322 };
323
324 //------------------------------------------------------------------------------
325 // A temporary collection of data that can be sorted and summarized. It is 392 // A temporary collection of data that can be sorted and summarized. It is
326 // gathered (carefully) from many threads. Instances are held in arrays and 393 // gathered (carefully) from many threads. Instances are held in arrays and
327 // processed, filtered, and rendered. 394 // processed, filtered, and rendered.
328 // The source of this data was collected on many threads, and is asynchronously 395 // The source of this data was collected on many threads, and is asynchronously
329 // changing. The data in this instance is not asynchronously changing. 396 // changing. The data in this instance is not asynchronously changing.
330 397
331 struct BASE_EXPORT TaskSnapshot { 398 struct BASE_EXPORT TaskSnapshot {
332 TaskSnapshot(); 399 TaskSnapshot();
333 TaskSnapshot(const BirthOnThread& birth, 400 TaskSnapshot(const BirthOnThreadSnapshot& birth,
334 const DeathData& death_data, 401 const DeathDataSnapshot& death_data,
335 const std::string& death_thread_name); 402 const std::string& death_thread_name);
336 ~TaskSnapshot(); 403 ~TaskSnapshot();
337 404
338 BirthOnThreadSnapshot birth; 405 BirthOnThreadSnapshot birth;
406 // Delta between death data for a thread for a certain profiling phase and the
407 // snapshot for the pervious phase, if any. Otherwise, just a snapshot.
339 DeathDataSnapshot death_data; 408 DeathDataSnapshot death_data;
340 std::string death_thread_name; 409 std::string death_thread_name;
341 }; 410 };
342 411
343 //------------------------------------------------------------------------------ 412 //------------------------------------------------------------------------------
344 // For each thread, we have a ThreadData that stores all tracking info generated 413 // For each thread, we have a ThreadData that stores all tracking info generated
345 // on this thread. This prevents the need for locking as data accumulates. 414 // on this thread. This prevents the need for locking as data accumulates.
346 // We use ThreadLocalStorage to quickly identfy the current ThreadData context. 415 // We use ThreadLocalStorage to quickly identfy the current ThreadData context.
347 // We also have a linked list of ThreadData instances, and that list is used to 416 // We also have a linked list of ThreadData instances, and that list is used to
348 // harvest data from all existing instances. 417 // harvest data from all existing instances.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
381 // only used by the message loop, which has a well defined thread name. 450 // only used by the message loop, which has a well defined thread name.
382 static void InitializeThreadContext(const std::string& suggested_name); 451 static void InitializeThreadContext(const std::string& suggested_name);
383 452
384 // Using Thread Local Store, find the current instance for collecting data. 453 // Using Thread Local Store, find the current instance for collecting data.
385 // If an instance does not exist, construct one (and remember it for use on 454 // If an instance does not exist, construct one (and remember it for use on
386 // this thread. 455 // this thread.
387 // This may return NULL if the system is disabled for any reason. 456 // This may return NULL if the system is disabled for any reason.
388 static ThreadData* Get(); 457 static ThreadData* Get();
389 458
390 // Fills |process_data_snapshot| with phased snapshots of all profiling 459 // Fills |process_data_snapshot| with phased snapshots of all profiling
391 // phases, including the current one. 460 // phases, including the current one, identified by |current_profiling_phase|.
392 static void Snapshot(ProcessDataSnapshot* process_data_snapshot); 461 // |current_profiling_phase| is necessary because a child process can start
462 // after several phase-changing events, so it needs to receive the current
463 // phase number from the browser process to fill the correct entry for the
464 // current phase in the |process_data_snapshot| map.
465 static void Snapshot(int current_profiling_phase,
466 ProcessDataSnapshot* process_data_snapshot);
467
468 // Called when the current profiling phase, identified by |profiling_phase|,
469 // ends.
470 // |profiling_phase| is necessary because a child process can start after
471 // several phase-changing events, so it needs to receive the phase number from
472 // the browser process to fill the correct entry in the
473 // completed_phases_snapshots_ map.
474 static void OnProfilingPhaseCompleted(int profiling_phase);
393 475
394 // Finds (or creates) a place to count births from the given location in this 476 // Finds (or creates) a place to count births from the given location in this
395 // thread, and increment that tally. 477 // thread, and increment that tally.
396 // TallyABirthIfActive will returns NULL if the birth cannot be tallied. 478 // TallyABirthIfActive will returns NULL if the birth cannot be tallied.
397 static Births* TallyABirthIfActive(const Location& location); 479 static Births* TallyABirthIfActive(const Location& location);
398 480
399 // Records the end of a timed run of an object. The |completed_task| contains 481 // Records the end of a timed run of an object. The |completed_task| contains
400 // a pointer to a Births, the time_posted, and a delayed_start_time if any. 482 // a pointer to a Births, the time_posted, and a delayed_start_time if any.
401 // The |start_of_run| indicates when we started to perform the run of the 483 // The |start_of_run| indicates when we started to perform the run of the
402 // task. The delayed_start_time is non-null for tasks that were posted as 484 // task. The delayed_start_time is non-null for tasks that were posted as
403 // delayed tasks, and it indicates when the task should have run (i.e., when 485 // delayed tasks, and it indicates when the task should have run (i.e., when
404 // it should have posted out of the timer queue, and into the work queue. 486 // it should have posted out of the timer queue, and into the work queue.
405 // The |end_of_run| was just obtained by a call to Now() (just after the task 487 // The |end_of_run| was just obtained by a call to Now() (just after the task
406 // finished). It is provided as an argument to help with testing. 488 // finished). It is provided as an argument to help with testing.
407 static void TallyRunOnNamedThreadIfTracking( 489 static void TallyRunOnNamedThreadIfTracking(
408 const base::TrackingInfo& completed_task, 490 const base::TrackingInfo& completed_task,
409 const TaskStopwatch& stopwatch); 491 const TaskStopwatch& stopwatch);
410 492
411 // Record the end of a timed run of an object. The |birth| is the record for 493 // Record the end of a timed run of an object. The |birth| is the record for
412 // the instance, the |time_posted| records that instant, which is presumed to 494 // the instance, the |time_posted| records that instant, which is presumed to
413 // be when the task was posted into a queue to run on a worker thread. 495 // be when the task was posted into a queue to run on a worker thread.
414 // The |start_of_run| is when the worker thread started to perform the run of 496 // The |start_of_run| is when the worker thread started to perform the run of
415 // the task. 497 // the task.
416 // The |end_of_run| was just obtained by a call to Now() (just after the task 498 // The |end_of_run| was just obtained by a call to Now() (just after the task
417 // finished). 499 // finished).
418 static void TallyRunOnWorkerThreadIfTracking(const Births* birth, 500 static void TallyRunOnWorkerThreadIfTracking(const Births* births,
419 const TrackedTime& time_posted, 501 const TrackedTime& time_posted,
420 const TaskStopwatch& stopwatch); 502 const TaskStopwatch& stopwatch);
421 503
422 // Record the end of execution in region, generally corresponding to a scope 504 // Record the end of execution in region, generally corresponding to a scope
423 // being exited. 505 // being exited.
424 static void TallyRunInAScopedRegionIfTracking(const Births* birth, 506 static void TallyRunInAScopedRegionIfTracking(const Births* births,
425 const TaskStopwatch& stopwatch); 507 const TaskStopwatch& stopwatch);
426 508
427 const std::string& thread_name() const { return thread_name_; } 509 const std::string& thread_name() const { return thread_name_; }
428 510
429 // Initializes all statics if needed (this initialization call should be made 511 // Initializes all statics if needed (this initialization call should be made
430 // while we are single threaded). Returns false if unable to initialize. 512 // while we are single threaded). Returns false if unable to initialize.
431 static bool Initialize(); 513 static bool Initialize();
432 514
433 // Sets internal status_. 515 // Sets internal status_.
434 // If |status| is false, then status_ is set to DEACTIVATED. 516 // If |status| is false, then status_ is set to DEACTIVATED.
435 // If |status| is true, then status_ is set to, PROFILING_ACTIVE, or 517 // If |status| is true, then status_ is set to, PROFILING_ACTIVE, or
436 // PROFILING_CHILDREN_ACTIVE. 518 // PROFILING_CHILDREN_ACTIVE.
437 // If tracking is not compiled in, this function will return false. 519 // If tracking is not compiled in, this function will return false.
438 // If parent-child tracking is not compiled in, then an attempt to set the 520 // If parent-child tracking is not compiled in, then an attempt to set the
439 // status to PROFILING_CHILDREN_ACTIVE will only result in a status of 521 // status to PROFILING_CHILDREN_ACTIVE will only result in a status of
440 // PROFILING_ACTIVE (i.e., it can't be set to a higher level than what is 522 // PROFILING_ACTIVE (i.e., it can't be set to a higher level than what is
441 // compiled into the binary, and parent-child tracking at the 523 // compiled into the binary, and parent-child tracking at the
442 // PROFILING_CHILDREN_ACTIVE level might not be compiled in). 524 // PROFILING_CHILDREN_ACTIVE level might not be compiled in).
443 static bool InitializeAndSetTrackingStatus(Status status); 525 static bool InitializeAndSetTrackingStatus(Status status);
444 526
445 static Status status(); 527 static Status status();
446 528
447 // Indicate if any sort of profiling is being done (i.e., we are more than 529 // Indicate if any sort of profiling is being done (i.e., we are more than
448 // DEACTIVATED). 530 // DEACTIVATED).
449 static bool TrackingStatus(); 531 static bool TrackingStatus();
450 532
451 // For testing only, indicate if the status of parent-child tracking is turned 533 // For testing only, indicate if the status of parent-child tracking is turned
452 // on. This is currently a compiled option, atop TrackingStatus(). 534 // on. This is currently a compiled option, atop TrackingStatus().
453 static bool TrackingParentChildStatus(); 535 static bool TrackingParentChildStatus();
454 536
455 // Marks a start of a tracked run. It's super fast when tracking is disabled, 537 // Marks a start of a tracked run. It's super fast when tracking is disabled,
456 // and has some internal side effects when we are tracking, so that we can 538 // and has some internal side effects when we are tracking, so that we can
457 // deduce the amount of time accumulated outside of execution of tracked runs. 539 // deduce the amount of time accumulated outside of execution of tracked runs.
458 // The task that will be tracked is passed in as |parent| so that parent-child 540 // The task that will be tracked is passed in as |parent| so that parent-child
459 // relationships can be (optionally) calculated. 541 // relationships can be (optionally) calculated.
460 static void PrepareForStartOfRun(const Births* parent); 542 static void PrepareForStartOfRun(const Births* parent);
461 543
462 // Enables profiler timing. 544 // Enables profiler timing.
463 static void EnableProfilerTiming(); 545 static void EnableProfilerTiming();
464 546
465 // Provide a time function that does nothing (runs fast) when we don't have 547 // Provide a time function that does nothing (runs fast) when we don't have
(...skipping 20 matching lines...) Expand all
486 // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a 568 // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
487 // better change of optimizing (inlining? etc.) private methods (knowing that 569 // better change of optimizing (inlining? etc.) private methods (knowing that
488 // there will be no need for an external entry point). 570 // there will be no need for an external entry point).
489 friend class TrackedObjectsTest; 571 friend class TrackedObjectsTest;
490 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown); 572 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
491 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown); 573 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
492 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, ParentChildTest); 574 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, ParentChildTest);
493 575
494 typedef std::map<const BirthOnThread*, int> BirthCountMap; 576 typedef std::map<const BirthOnThread*, int> BirthCountMap;
495 577
578 typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
579 DeathsSnapshot;
580
496 // Worker thread construction creates a name since there is none. 581 // Worker thread construction creates a name since there is none.
497 explicit ThreadData(int thread_number); 582 explicit ThreadData(int thread_number);
498 583
499 // Message loop based construction should provide a name. 584 // Message loop based construction should provide a name.
500 explicit ThreadData(const std::string& suggested_name); 585 explicit ThreadData(const std::string& suggested_name);
501 586
502 ~ThreadData(); 587 ~ThreadData();
503 588
504 // Push this instance to the head of all_thread_data_list_head_, linking it to 589 // Push this instance to the head of all_thread_data_list_head_, linking it to
505 // the previous head. This is performed after each construction, and leaves 590 // the previous head. This is performed after each construction, and leaves
506 // the instance permanently on that list. 591 // the instance permanently on that list.
507 void PushToHeadOfList(); 592 void PushToHeadOfList();
508 593
509 // (Thread safe) Get start of list of all ThreadData instances using the lock. 594 // (Thread safe) Get start of list of all ThreadData instances using the lock.
510 static ThreadData* first(); 595 static ThreadData* first();
511 596
512 // Iterate through the null terminated list of ThreadData instances. 597 // Iterate through the null terminated list of ThreadData instances.
513 ThreadData* next() const; 598 ThreadData* next() const;
514 599
515 600
516 // In this thread's data, record a new birth. 601 // In this thread's data, record a new birth.
517 Births* TallyABirth(const Location& location); 602 Births* TallyABirth(const Location& location);
518 603
519 // Find a place to record a death on this thread. 604 // Find a place to record a death on this thread.
520 void TallyADeath(const Births& birth, 605 void TallyADeath(const Births& births,
521 int32 queue_duration, 606 int32 queue_duration,
522 const TaskStopwatch& stopwatch); 607 const TaskStopwatch& stopwatch);
523 608
524 // Snapshot (under a lock) the profiled data for the tasks in each ThreadData
525 // instance. Also updates the |birth_counts| tally for each task to keep
526 // track of the number of living instances of the task.
527 static void SnapshotAllExecutedTasks(
528 ProcessDataPhaseSnapshot* process_data_phase,
529 BirthCountMap* birth_counts);
530
531 // Fills |process_data_phase| with all the recursive results in our process.
532 static void SnapshotCurrentPhase(
533 ProcessDataPhaseSnapshot* process_data_phase);
534
535 // Snapshots (under a lock) the profiled data for the tasks for this thread 609 // Snapshots (under a lock) the profiled data for the tasks for this thread
536 // and writes all of the executed tasks' data -- i.e. the data for the tasks 610 // and writes all of the executed tasks' data -- i.e. the data for all
537 // with with entries in the death_map_ -- into |process_data_phase|. Also 611 // profiling phases (including the current one: |current_profiling_phase|) for
538 // updates the |birth_counts| tally for each task to keep track of the number 612 // the tasks with with entries in the death_map_ -- into |phased_snapshots|.
539 // of living instances of the task -- that is, each task maps to the number of 613 // Also updates the |birth_counts| tally for each task to keep track of the
540 // births for the task that have not yet been balanced by a death. 614 // number of living instances of the task -- that is, each task maps to the
541 void SnapshotExecutedTasks(ProcessDataPhaseSnapshot* process_data_phase, 615 // number of births for the task that have not yet been balanced by a death.
616 void SnapshotExecutedTasks(int current_profiling_phase,
617 PhasedProcessDataSnapshotMap* phased_snapshots,
542 BirthCountMap* birth_counts); 618 BirthCountMap* birth_counts);
543 619
544 // Using our lock, make a copy of the specified maps. This call may be made 620 // Using our lock, make a copy of the specified maps. This call may be made
545 // on non-local threads, which necessitate the use of the lock to prevent 621 // on non-local threads, which necessitate the use of the lock to prevent
546 // the map(s) from being reallocated while they are copied. 622 // the map(s) from being reallocated while they are copied.
547 void SnapshotMaps(BirthMap* birth_map, 623 void SnapshotMaps(int profiling_phase,
548 DeathMap* death_map, 624 BirthMap* birth_map,
625 DeathsSnapshot* deaths,
549 ParentChildSet* parent_child_set); 626 ParentChildSet* parent_child_set);
550 627
628 // Called for this thread when the current profiling phase, identified by
629 // |profiling_phase|, ends.
630 void OnProfilingPhaseCompletedOnThread(int profiling_phase);
631
551 // This method is called by the TLS system when a thread terminates. 632 // This method is called by the TLS system when a thread terminates.
552 // The argument may be NULL if this thread has never tracked a birth or death. 633 // The argument may be NULL if this thread has never tracked a birth or death.
553 static void OnThreadTermination(void* thread_data); 634 static void OnThreadTermination(void* thread_data);
554 635
555 // This method should be called when a worker thread terminates, so that we 636 // This method should be called when a worker thread terminates, so that we
556 // can save all the thread data into a cache of reusable ThreadData instances. 637 // can save all the thread data into a cache of reusable ThreadData instances.
557 void OnThreadTerminationCleanup(); 638 void OnThreadTerminationCleanup();
558 639
559 // Cleans up data structures, and returns statics to near pristine (mostly 640 // Cleans up data structures, and returns statics to near pristine (mostly
560 // uninitialized) state. If there is any chance that other threads are still 641 // uninitialized) state. If there is any chance that other threads are still
(...skipping 10 matching lines...) Expand all
571 // increasing time functcion. 652 // increasing time functcion.
572 static NowFunction* now_function_; 653 static NowFunction* now_function_;
573 654
574 // If true, now_function_ returns values that can be used to calculate queue 655 // If true, now_function_ returns values that can be used to calculate queue
575 // time. 656 // time.
576 static bool now_function_is_time_; 657 static bool now_function_is_time_;
577 658
578 // We use thread local store to identify which ThreadData to interact with. 659 // We use thread local store to identify which ThreadData to interact with.
579 static base::ThreadLocalStorage::StaticSlot tls_index_; 660 static base::ThreadLocalStorage::StaticSlot tls_index_;
580 661
581 // List of ThreadData instances for use with worker threads. When a worker 662 // List of ThreadData instances for use with worker threads. When a worker
582 // thread is done (terminated), we push it onto this list. When a new worker 663 // thread is done (terminated), we push it onto this list. When a new worker
583 // thread is created, we first try to re-use a ThreadData instance from the 664 // thread is created, we first try to re-use a ThreadData instance from the
584 // list, and if none are available, construct a new one. 665 // list, and if none are available, construct a new one.
585 // This is only accessed while list_lock_ is held. 666 // This is only accessed while list_lock_ is held.
586 static ThreadData* first_retired_worker_; 667 static ThreadData* first_retired_worker_;
587 668
588 // Link to the most recently created instance (starts a null terminated list). 669 // Link to the most recently created instance (starts a null terminated list).
589 // The list is traversed by about:profiler when it needs to snapshot data. 670 // The list is traversed by about:profiler when it needs to snapshot data.
590 // This is only accessed while list_lock_ is held. 671 // This is only accessed while list_lock_ is held.
591 static ThreadData* all_thread_data_list_head_; 672 static ThreadData* all_thread_data_list_head_;
592 673
593 // The next available worker thread number. This should only be accessed when 674 // The next available worker thread number. This should only be accessed when
594 // the list_lock_ is held. 675 // the list_lock_ is held.
595 static int worker_thread_data_creation_count_; 676 static int worker_thread_data_creation_count_;
596 677
597 // The number of times TLS has called us back to cleanup a ThreadData 678 // The number of times TLS has called us back to cleanup a ThreadData
598 // instance. This is only accessed while list_lock_ is held. 679 // instance. This is only accessed while list_lock_ is held.
599 static int cleanup_count_; 680 static int cleanup_count_;
600 681
601 // Incarnation sequence number, indicating how many times (during unittests) 682 // Incarnation sequence number, indicating how many times (during unittests)
602 // we've either transitioned out of UNINITIALIZED, or into that state. This 683 // we've either transitioned out of UNINITIALIZED, or into that state. This
603 // value is only accessed while the list_lock_ is held. 684 // value is only accessed while the list_lock_ is held.
604 static int incarnation_counter_; 685 static int incarnation_counter_;
605 686
606 // Protection for access to all_thread_data_list_head_, and to 687 // Protection for access to all_thread_data_list_head_, and to
607 // unregistered_thread_data_pool_. This lock is leaked at shutdown. 688 // unregistered_thread_data_pool_. This lock is leaked at shutdown.
608 // The lock is very infrequently used, so we can afford to just make a lazy 689 // The lock is very infrequently used, so we can afford to just make a lazy
609 // instance and be safe. 690 // instance and be safe.
610 static base::LazyInstance<base::Lock>::Leaky list_lock_; 691 static base::LazyInstance<base::Lock>::Leaky list_lock_;
611 692
612 // We set status_ to SHUTDOWN when we shut down the tracking service. 693 // We set status_ to SHUTDOWN when we shut down the tracking service.
613 static Status status_; 694 static Status status_;
614 695
615 // Link to next instance (null terminated list). Used to globally track all 696 // Link to next instance (null terminated list). Used to globally track all
616 // registered instances (corresponds to all registered threads where we keep 697 // registered instances (corresponds to all registered threads where we keep
617 // data). 698 // data).
618 ThreadData* next_; 699 ThreadData* next_;
619 700
620 // Pointer to another ThreadData instance for a Worker-Thread that has been 701 // Pointer to another ThreadData instance for a Worker-Thread that has been
621 // retired (its thread was terminated). This value is non-NULL only for a 702 // retired (its thread was terminated). This value is non-NULL only for a
622 // retired ThreadData associated with a Worker-Thread. 703 // retired ThreadData associated with a Worker-Thread.
623 ThreadData* next_retired_worker_; 704 ThreadData* next_retired_worker_;
624 705
625 // The name of the thread that is being recorded. If this thread has no 706 // The name of the thread that is being recorded. If this thread has no
(...skipping 11 matching lines...) Expand all
637 // When a snapshot is needed, this structure can be locked in place for the 718 // When a snapshot is needed, this structure can be locked in place for the
638 // duration of the snapshotting activity. 719 // duration of the snapshotting activity.
639 BirthMap birth_map_; 720 BirthMap birth_map_;
640 721
641 // Similar to birth_map_, this records informations about death of tracked 722 // Similar to birth_map_, this records informations about death of tracked
642 // instances (i.e., when a tracked instance was destroyed on this thread). 723 // instances (i.e., when a tracked instance was destroyed on this thread).
643 // It is locked before changing, and hence other threads may access it by 724 // It is locked before changing, and hence other threads may access it by
644 // locking before reading it. 725 // locking before reading it.
645 DeathMap death_map_; 726 DeathMap death_map_;
646 727
647 // A set of parents that created children tasks on this thread. Each pair 728 // A set of parents that created children tasks on this thread. Each pair
648 // corresponds to potentially non-local Births (location and thread), and a 729 // corresponds to potentially non-local Births (location and thread), and a
649 // local Births (that took place on this thread). 730 // local Births (that took place on this thread).
650 ParentChildSet parent_child_set_; 731 ParentChildSet parent_child_set_;
651 732
652 // Lock to protect *some* access to BirthMap and DeathMap. The maps are 733 // Lock to protect *some* access to BirthMap and DeathMap. The maps are
653 // regularly read and written on this thread, but may only be read from other 734 // regularly read and written on this thread, but may only be read from other
654 // threads. To support this, we acquire this lock if we are writing from this 735 // threads. To support this, we acquire this lock if we are writing from this
655 // thread, or reading from another thread. For reading from this thread we 736 // thread, or reading from another thread. For reading from this thread we
656 // don't need a lock, as there is no potential for a conflict since the 737 // don't need a lock, as there is no potential for a conflict since the
657 // writing is only done from this thread. 738 // writing is only done from this thread.
658 mutable base::Lock map_lock_; 739 mutable base::Lock map_lock_;
659 740
660 // The stack of parents that are currently being profiled. This includes only 741 // The stack of parents that are currently being profiled. This includes only
661 // tasks that have started a timer recently via PrepareForStartOfRun(), but 742 // tasks that have started a timer recently via PrepareForStartOfRun(), but
662 // not yet concluded with a NowForEndOfRun(). Usually this stack is one deep, 743 // not yet concluded with a NowForEndOfRun(). Usually this stack is one deep,
663 // but if a scoped region is profiled, or <sigh> a task runs a nested-message 744 // but if a scoped region is profiled, or <sigh> a task runs a nested-message
664 // loop, then the stack can grow larger. Note that we don't try to deduct 745 // loop, then the stack can grow larger. Note that we don't try to deduct
665 // time in nested profiles, as our current timer is based on wall-clock time, 746 // time in nested profiles, as our current timer is based on wall-clock time,
666 // and not CPU time (and we're hopeful that nested timing won't be a 747 // and not CPU time (and we're hopeful that nested timing won't be a
667 // significant additional cost). 748 // significant additional cost).
668 ParentStack parent_stack_; 749 ParentStack parent_stack_;
669 750
670 // A random number that we used to select decide which sample to keep as a 751 // A random number that we used to select decide which sample to keep as a
(...skipping 10 matching lines...) Expand all
681 762
682 // Most recently started (i.e. most nested) stopwatch on the current thread, 763 // Most recently started (i.e. most nested) stopwatch on the current thread,
683 // if it exists; NULL otherwise. 764 // if it exists; NULL otherwise.
684 TaskStopwatch* current_stopwatch_; 765 TaskStopwatch* current_stopwatch_;
685 766
686 DISALLOW_COPY_AND_ASSIGN(ThreadData); 767 DISALLOW_COPY_AND_ASSIGN(ThreadData);
687 }; 768 };
688 769
689 //------------------------------------------------------------------------------ 770 //------------------------------------------------------------------------------
690 // Stopwatch to measure task run time or simply create a time interval that will 771 // Stopwatch to measure task run time or simply create a time interval that will
691 // be subtracted from the current most nested task's run time. Stopwatches 772 // be subtracted from the current most nested task's run time. Stopwatches
692 // coordinate with the stopwatches in which they are nested to avoid 773 // coordinate with the stopwatches in which they are nested to avoid
693 // double-counting nested tasks run times. 774 // double-counting nested tasks run times.
694 775
695 class BASE_EXPORT TaskStopwatch { 776 class BASE_EXPORT TaskStopwatch {
696 public: 777 public:
697 // Starts the stopwatch. 778 // Starts the stopwatch.
698 TaskStopwatch(); 779 TaskStopwatch();
699 ~TaskStopwatch(); 780 ~TaskStopwatch();
700 781
701 // Starts stopwatch. 782 // Starts stopwatch.
(...skipping 27 matching lines...) Expand all
729 // Sum of wallclock durations of all stopwatches that were directly nested in 810 // Sum of wallclock durations of all stopwatches that were directly nested in
730 // this one. 811 // this one.
731 int32 excluded_duration_ms_; 812 int32 excluded_duration_ms_;
732 813
733 // Stopwatch which was running on our thread when this stopwatch was started. 814 // Stopwatch which was running on our thread when this stopwatch was started.
734 // That preexisting stopwatch must be adjusted to the exclude the wallclock 815 // That preexisting stopwatch must be adjusted to the exclude the wallclock
735 // duration of this stopwatch. 816 // duration of this stopwatch.
736 TaskStopwatch* parent_; 817 TaskStopwatch* parent_;
737 818
738 #if DCHECK_IS_ON() 819 #if DCHECK_IS_ON()
739 // State of the stopwatch. Stopwatch is first constructed in a created state 820 // State of the stopwatch. Stopwatch is first constructed in a created state
740 // state, then is optionally started/stopped, then destructed. 821 // state, then is optionally started/stopped, then destructed.
741 enum { CREATED, RUNNING, STOPPED } state_; 822 enum { CREATED, RUNNING, STOPPED } state_;
742 823
743 // Currently running stopwatch that is directly nested in this one, if such 824 // Currently running stopwatch that is directly nested in this one, if such
744 // stopwatch exists. NULL otherwise. 825 // stopwatch exists. NULL otherwise.
745 TaskStopwatch* child_; 826 TaskStopwatch* child_;
746 #endif 827 #endif
747 }; 828 };
748 829
749 //------------------------------------------------------------------------------ 830 //------------------------------------------------------------------------------
750 // A snapshotted representation of a (parent, child) task pair, for tracking 831 // A snapshotted representation of a (parent, child) task pair, for tracking
751 // hierarchical profiles. 832 // hierarchical profiles.
752 833
753 struct BASE_EXPORT ParentChildPairSnapshot { 834 struct BASE_EXPORT ParentChildPairSnapshot {
754 public: 835 public:
(...skipping 21 matching lines...) Expand all
776 857
777 //------------------------------------------------------------------------------ 858 //------------------------------------------------------------------------------
778 // A snapshotted representation of the list of ThreadData objects for a process, 859 // A snapshotted representation of the list of ThreadData objects for a process,
779 // for all profiling phases, including the current one. 860 // for all profiling phases, including the current one.
780 861
781 struct BASE_EXPORT ProcessDataSnapshot { 862 struct BASE_EXPORT ProcessDataSnapshot {
782 public: 863 public:
783 ProcessDataSnapshot(); 864 ProcessDataSnapshot();
784 ~ProcessDataSnapshot(); 865 ~ProcessDataSnapshot();
785 866
786 PhasedProcessDataSnapshotMap phased_process_data_snapshots; 867 PhasedProcessDataSnapshotMap phased_snapshots;
787 base::ProcessId process_id; 868 base::ProcessId process_id;
788 }; 869 };
789 870
790 } // namespace tracked_objects 871 } // namespace tracked_objects
791 872
792 #endif // BASE_TRACKED_OBJECTS_H_ 873 #endif // BASE_TRACKED_OBJECTS_H_
OLDNEW
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | base/tracked_objects.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698