| Index: base/tracked_objects_unittest.cc
|
| diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
|
| index 70d9601cd0f9be78570cabab1ce23d7b6413917e..b64e5fdd4e235868a8ad090175e105ab72a9e619 100644
|
| --- a/base/tracked_objects_unittest.cc
|
| +++ b/base/tracked_objects_unittest.cc
|
| @@ -22,6 +22,13 @@ const char kWorkerThreadName[] = "WorkerThread-1";
|
| const char kMainThreadName[] = "SomeMainThreadName";
|
| const char kStillAlive[] = "Still_Alive";
|
|
|
| +const int32_t kAllocOps = 23;
|
| +const int32_t kFreeOps = 27;
|
| +const int32_t kAllocatedBytes = 59934;
|
| +const int32_t kFreedBytes = 2 * kAllocatedBytes;
|
| +const int32_t kAllocOverheadBytes = kAllocOps * 8;
|
| +const int32_t kMaxAllocatedBytes = kAllocatedBytes / 2;
|
| +
|
| namespace tracked_objects {
|
|
|
| class TrackedObjectsTest : public testing::Test {
|
| @@ -237,7 +244,7 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
|
| EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].death_thread_name);
|
| }
|
|
|
| -TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
|
| +TEST_F(TrackedObjectsTest, DeathDataTestRecordDurations) {
|
| ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
|
|
|
| std::unique_ptr<DeathData> data(new DeathData());
|
| @@ -255,7 +262,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
|
| int32_t queue_ms = 8;
|
|
|
| const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
|
| - data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
|
| + data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
|
| EXPECT_EQ(data->run_duration_sum(), run_ms);
|
| EXPECT_EQ(data->run_duration_max(), run_ms);
|
| EXPECT_EQ(data->run_duration_sample(), run_ms);
|
| @@ -265,7 +272,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
|
| EXPECT_EQ(data->count(), 1);
|
| EXPECT_EQ(nullptr, data->last_phase_snapshot());
|
|
|
| - data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
|
| + data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
|
| EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
|
| EXPECT_EQ(data->run_duration_max(), run_ms);
|
| EXPECT_EQ(data->run_duration_sample(), run_ms);
|
| @@ -276,18 +283,77 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
|
| EXPECT_EQ(nullptr, data->last_phase_snapshot());
|
| }
|
|
|
| +TEST_F(TrackedObjectsTest, DeathDataTestRecordAllocations) {
|
| + ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
|
| +
|
| + std::unique_ptr<DeathData> data(new DeathData());
|
| + ASSERT_NE(data, nullptr);
|
| +
|
| + EXPECT_EQ(data->alloc_ops(), 0);
|
| + EXPECT_EQ(data->free_ops(), 0);
|
| + EXPECT_EQ(data->allocated_bytes(), 0);
|
| + EXPECT_EQ(data->freed_bytes(), 0);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), 0);
|
| + EXPECT_EQ(data->max_allocated_bytes(), 0);
|
| +
|
| + EXPECT_EQ(nullptr, data->last_phase_snapshot());
|
| +
|
| + data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
|
| + kAllocOverheadBytes, kMaxAllocatedBytes);
|
| + EXPECT_EQ(data->alloc_ops(), kAllocOps);
|
| + EXPECT_EQ(data->free_ops(), kFreeOps);
|
| + EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
|
| + EXPECT_EQ(data->freed_bytes(), kFreedBytes);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
|
| + EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
|
| +
|
| + // Record another batch, with a smaller max.
|
| + const int32_t kSmallerMaxAllocatedBytes = kMaxAllocatedBytes / 2;
|
| + data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
|
| + kAllocOverheadBytes, kSmallerMaxAllocatedBytes);
|
| + EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
|
| + EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
|
| + EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
|
| + EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
|
| + EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
|
| +
|
| + // Now with a larger max.
|
| + const int32_t kLargerMaxAllocatedBytes = kMaxAllocatedBytes * 2;
|
| + data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
|
| + kAllocOverheadBytes, kLargerMaxAllocatedBytes);
|
| + EXPECT_EQ(data->alloc_ops(), 3 * kAllocOps);
|
| + EXPECT_EQ(data->free_ops(), 3 * kFreeOps);
|
| + EXPECT_EQ(data->allocated_bytes(), 3 * kAllocatedBytes);
|
| + EXPECT_EQ(data->freed_bytes(), 3 * kFreedBytes);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), 3 * kAllocOverheadBytes);
|
| + EXPECT_EQ(data->max_allocated_bytes(), kLargerMaxAllocatedBytes);
|
| +
|
| + // Saturate everything.
|
| + data->RecordAllocations(INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX);
|
| + EXPECT_EQ(data->alloc_ops(), INT_MAX);
|
| + EXPECT_EQ(data->free_ops(), INT_MAX);
|
| + EXPECT_EQ(data->allocated_bytes(), INT_MAX);
|
| + EXPECT_EQ(data->freed_bytes(), INT_MAX);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), INT_MAX);
|
| + EXPECT_EQ(data->max_allocated_bytes(), INT_MAX);
|
| +}
|
| +
|
| TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
|
| ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
|
|
|
| std::unique_ptr<DeathData> data(new DeathData());
|
| ASSERT_NE(data, nullptr);
|
|
|
| - int32_t run_ms = 42;
|
| - int32_t queue_ms = 8;
|
| + const int32_t run_ms = 42;
|
| + const int32_t queue_ms = 8;
|
|
|
| const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
|
| - data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
|
| - data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
|
| + data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
|
| + data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
|
| +
|
| + data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
|
| + kAllocOverheadBytes, kMaxAllocatedBytes);
|
|
|
| data->OnProfilingPhaseCompleted(123);
|
| EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
|
| @@ -297,6 +363,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
|
| EXPECT_EQ(data->queue_duration_max(), 0);
|
| EXPECT_EQ(data->queue_duration_sample(), queue_ms);
|
| EXPECT_EQ(data->count(), 2);
|
| +
|
| + EXPECT_EQ(data->alloc_ops(), kAllocOps);
|
| + EXPECT_EQ(data->free_ops(), kFreeOps);
|
| + EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
|
| + EXPECT_EQ(data->freed_bytes(), kFreedBytes);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
|
| + EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
|
| +
|
| ASSERT_NE(nullptr, data->last_phase_snapshot());
|
| EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
|
| EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
|
| @@ -311,12 +385,26 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
|
| data->last_phase_snapshot()->death_data.queue_duration_max);
|
| EXPECT_EQ(queue_ms,
|
| data->last_phase_snapshot()->death_data.queue_duration_sample);
|
| +
|
| + EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
|
| + EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
|
| + EXPECT_EQ(kAllocatedBytes,
|
| + data->last_phase_snapshot()->death_data.allocated_bytes);
|
| + EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
|
| + EXPECT_EQ(kAllocOverheadBytes,
|
| + data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
|
| + EXPECT_EQ(kMaxAllocatedBytes,
|
| + data->last_phase_snapshot()->death_data.max_allocated_bytes);
|
| +
|
| EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
|
|
|
| - int32_t run_ms1 = 21;
|
| - int32_t queue_ms1 = 4;
|
| + const int32_t run_ms1 = 21;
|
| + const int32_t queue_ms1 = 4;
|
| +
|
| + data->RecordDurations(queue_ms1, run_ms1, kUnrandomInt);
|
| + data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
|
| + kAllocOverheadBytes, kMaxAllocatedBytes);
|
|
|
| - data->RecordDeath(queue_ms1, run_ms1, kUnrandomInt);
|
| EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms + run_ms1);
|
| EXPECT_EQ(data->run_duration_max(), run_ms1);
|
| EXPECT_EQ(data->run_duration_sample(), run_ms1);
|
| @@ -324,6 +412,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
|
| EXPECT_EQ(data->queue_duration_max(), queue_ms1);
|
| EXPECT_EQ(data->queue_duration_sample(), queue_ms1);
|
| EXPECT_EQ(data->count(), 3);
|
| +
|
| + EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
|
| + EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
|
| + EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
|
| + EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
|
| + EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
|
| + EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
|
| +
|
| ASSERT_NE(nullptr, data->last_phase_snapshot());
|
| EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
|
| EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
|
| @@ -338,6 +434,17 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
|
| data->last_phase_snapshot()->death_data.queue_duration_max);
|
| EXPECT_EQ(queue_ms,
|
| data->last_phase_snapshot()->death_data.queue_duration_sample);
|
| +
|
| + EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
|
| + EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
|
| + EXPECT_EQ(kAllocatedBytes,
|
| + data->last_phase_snapshot()->death_data.allocated_bytes);
|
| + EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
|
| + EXPECT_EQ(kAllocOverheadBytes,
|
| + data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
|
| + EXPECT_EQ(kMaxAllocatedBytes,
|
| + data->last_phase_snapshot()->death_data.max_allocated_bytes);
|
| +
|
| EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
|
| }
|
|
|
| @@ -353,6 +460,13 @@ TEST_F(TrackedObjectsTest, Delta) {
|
| snapshot.queue_duration_max = 101;
|
| snapshot.queue_duration_sample = 26;
|
|
|
| + snapshot.alloc_ops = 95;
|
| + snapshot.free_ops = 90;
|
| + snapshot.allocated_bytes = 10240;
|
| + snapshot.freed_bytes = 4096;
|
| + snapshot.alloc_overhead_bytes = 950;
|
| + snapshot.max_allocated_bytes = 10240;
|
| +
|
| DeathDataSnapshot older_snapshot;
|
| older_snapshot.count = 2;
|
| older_snapshot.run_duration_sum = 95;
|
| @@ -362,6 +476,13 @@ TEST_F(TrackedObjectsTest, Delta) {
|
| older_snapshot.queue_duration_max = 99;
|
| older_snapshot.queue_duration_sample = 21;
|
|
|
| + older_snapshot.alloc_ops = 45;
|
| + older_snapshot.free_ops = 40;
|
| + older_snapshot.allocated_bytes = 4096;
|
| + older_snapshot.freed_bytes = 2048;
|
| + older_snapshot.alloc_overhead_bytes = 450;
|
| + older_snapshot.max_allocated_bytes = 10200;
|
| +
|
| const DeathDataSnapshot& delta = snapshot.Delta(older_snapshot);
|
| EXPECT_EQ(8, delta.count);
|
| EXPECT_EQ(5, delta.run_duration_sum);
|
| @@ -370,6 +491,13 @@ TEST_F(TrackedObjectsTest, Delta) {
|
| EXPECT_EQ(10, delta.queue_duration_sum);
|
| EXPECT_EQ(101, delta.queue_duration_max);
|
| EXPECT_EQ(26, delta.queue_duration_sample);
|
| +
|
| + EXPECT_EQ(50, delta.alloc_ops);
|
| + EXPECT_EQ(50, delta.free_ops);
|
| + EXPECT_EQ(6144, delta.allocated_bytes);
|
| + EXPECT_EQ(2048, delta.freed_bytes);
|
| + EXPECT_EQ(500, delta.alloc_overhead_bytes);
|
| + EXPECT_EQ(10240, delta.max_allocated_bytes);
|
| }
|
|
|
| TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotWorkerThread) {
|
|
|