Chromium Code Reviews| Index: base/metrics/field_trial_unittest.cc |
| diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc |
| index 0af6d6082ac897ecb4d542dd82ea9de82575b5ae..10b3dc8baf1954191b6292e32c44ea05f3c429ec 100644 |
| --- a/base/metrics/field_trial_unittest.cc |
| +++ b/base/metrics/field_trial_unittest.cc |
| @@ -1,4 +1,4 @@ |
| -// Copyright (c) 2010 The Chromium Authors. All rights reserved. |
| +// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| @@ -6,7 +6,9 @@ |
| #include "base/metrics/field_trial.h" |
| +#include "base/rand_util.h" |
| #include "base/stringprintf.h" |
| +#include "base/string_number_conversions.h" |
| #include "testing/gtest/include/gtest/gtest.h" |
| namespace base { |
| @@ -295,4 +297,70 @@ TEST_F(FieldTrialTest, MakeName) { |
| FieldTrial::MakeName("Histogram", "Field Trial")); |
| } |
| +TEST_F(FieldTrialTest, MachineIdToUniformDouble) { |
| + double results[] = { |
| + FieldTrial::MachineIdToUniformDouble("hi"), |
| + FieldTrial::MachineIdToUniformDouble("there"), |
| + }; |
| + EXPECT_NE(results[0], results[1]); |
| + for (int i = 0; i < arraysize(results); ++i) { |
| + EXPECT_LE(0.0, results[i]); |
| + EXPECT_GT(1.0, results[i]); |
| + } |
| + |
| + EXPECT_EQ(FieldTrial::MachineIdToUniformDouble("yo"), |
| + FieldTrial::MachineIdToUniformDouble("yo")); |
| +} |
| + |
| +// Not marking flaky as it should be incredibly rare for it to |
| +// not pass, and the diagnostics we print in this case would help |
| +// determine whether it is a true failure. |
| +TEST_F(FieldTrialTest, ProbabilisticMachineIdToUniformDoubleIsUniform) { |
| + // A uniform distribution should result in an expected average value |
| + // of 0.5. The actual average for a large number of samples should, |
| + // with extremely high probability, be very close to this. |
| + const int kNumSamples = 5000; |
| + double total_value = 0.0; |
| + for (int i = 0; i < kNumSamples; ++i) { |
| + total_value += FieldTrial::MachineIdToUniformDouble( |
| + IntToString(RandInt(0, MAXINT))); |
| + } |
| + |
| + double average = total_value / kNumSamples; |
| + double kExpectedMin = 0.45; |
| + double kExpectedMax = 0.55; |
| + EXPECT_LT(kExpectedMin, average); |
| + EXPECT_GT(kExpectedMax, average); |
|
jar (doing other things)
2011/04/21 01:03:50
Flaky tests are not acceptable, even with an expla
Jói
2011/04/21 19:50:33
Fixed.
|
| + |
| + if (average < kExpectedMin || average > kExpectedMax) { |
| + printf("Average was %f, outside the expected range (%f, %f).\n" |
| + "Values far outside the range may indicate a real problem,\n" |
| + "whereas values just outside the range are likely just flukes.\n", |
| + average, kExpectedMin, kExpectedMax); |
| + } |
| +} |
| + |
| +TEST_F(FieldTrialTest, UseOneTimeRandomization) { |
| + // Simply asserts that two trials with the same machine_id and the |
| + // same set of groups should return the same results. |
| + |
| + scoped_refptr<FieldTrial> trials[] = { |
| + new FieldTrial("first", 100, "default", 2050, 1, 1), |
|
jar (doing other things)
2011/04/21 01:03:50
use (... next_year_, 12, 31) for the date. See li
Jói
2011/04/21 19:50:33
Done.
|
| + new FieldTrial("second", 100, "default", 2050, 1, 1), |
| + }; |
| + |
| + std::string machine_id = IntToString(RandInt(0, MAXINT)); |
| + for (int i = 0; i < arraysize(trials); ++i) { |
| + // Same machine_id for both trials. |
| + trials[i]->UseOneTimeRandomization(machine_id); |
| + |
| + for (int j = 0; j < 10; ++j) { |
| + trials[i]->AppendGroup("", 10); |
| + } |
| + } |
| + |
| + ASSERT_EQ(trials[0]->group(), trials[1]->group()); |
| + ASSERT_EQ(trials[0]->group_name(), trials[1]->group_name()); |
| +} |
| + |
| } // namespace base |