Index: base/metrics/field_trial_unittest.cc |
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc |
index 0af6d6082ac897ecb4d542dd82ea9de82575b5ae..7b2e5c0c374c2c688ba558d59e8e64960d4fd539 100644 |
--- a/base/metrics/field_trial_unittest.cc |
+++ b/base/metrics/field_trial_unittest.cc |
@@ -1,4 +1,4 @@ |
-// Copyright (c) 2010 The Chromium Authors. All rights reserved. |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
@@ -6,9 +6,14 @@ |
#include "base/metrics/field_trial.h" |
+#include "base/perftimer.h" |
+#include "base/rand_util.h" |
#include "base/stringprintf.h" |
+#include "base/string_number_conversions.h" |
#include "testing/gtest/include/gtest/gtest.h" |
+#include <limits> |
+ |
namespace base { |
class FieldTrialTest : public testing::Test { |
@@ -295,4 +300,103 @@ TEST_F(FieldTrialTest, MakeName) { |
FieldTrial::MakeName("Histogram", "Field Trial")); |
} |
+TEST_F(FieldTrialTest, HashClientId) { |
+ double results[] = { |
+ FieldTrial::HashClientId("hi", "1"), |
+ FieldTrial::HashClientId("there", "1"), |
+ }; |
+ ASSERT_NE(results[0], results[1]); |
+ for (size_t i = 0; i < arraysize(results); ++i) { |
+ ASSERT_LE(0.0, results[i]); |
+ ASSERT_GT(1.0, results[i]); |
+ } |
+ |
+ ASSERT_EQ(FieldTrial::HashClientId("yo", "1"), |
+ FieldTrial::HashClientId("yo", "1")); |
+ ASSERT_NE(FieldTrial::HashClientId("yo", "something"), |
+ FieldTrial::HashClientId("yo", "else")); |
+} |
+ |
+TEST_F(FieldTrialTest, HashClientIdIsUniform) { |
+ PerfTimer timer; |
+ bool success = false; |
+ while (!success && timer.Elapsed() < TimeDelta::FromSeconds(20)) { |
jar (doing other things)
2011/04/21 22:10:02
You probably don't need to (and shouldn't) stop in
Paweł Hajdan Jr.
2011/04/26 10:23:10
Just in case you decide to keep a timeout here, pl
Jói
2011/04/28 01:03:50
Thanks, I'm removing the timeout altogether.
Jói
2011/04/28 01:03:50
The way the test was written (with or without the
|
+ // A uniform distribution should result in an expected average value |
+ // of 0.5. The actual average for a large number of samples should, |
+ // with extremely high probability, be very close to this. |
+ const int kNumSamples = 1000; |
+ double total_value = 0.0; |
+ |
+ // Choose a random start number but go sequentially from there, so |
+ // that each test tries a different range but we never provide uniformly |
+ // distributed data. |
+ int start_number = RandInt(0, std::numeric_limits<int>::max()); |
+ for (int i = 0; i < kNumSamples; ++i) { |
+ total_value += FieldTrial::HashClientId( |
+ IntToString(start_number + i), "salt"); |
+ } |
+ |
+ double average = total_value / kNumSamples; |
+ double kExpectedMin = 0.45; |
+ double kExpectedMax = 0.55; |
+ |
+ if (average < kExpectedMin || average > kExpectedMax) { |
+ printf("Average was %f, outside the expected range (%f, %f).\n" |
+ "Values far outside the range may indicate a real problem,\n" |
+ "whereas values just outside the range are likely just flukes.\n" |
+ "(Will probably retry and print PASSED.)", |
+ average, kExpectedMin, kExpectedMax); |
+ } else { |
+ success = true; |
+ } |
+ } |
+ |
+ ASSERT_TRUE(success); |
+} |
+ |
+TEST_F(FieldTrialTest, UseOneTimeRandomization) { |
+ // Simply asserts that two trials using one-time randomization |
+ // that have different names, normally generate different results. |
+ // |
+ // Note that depending on the one-time random initialization, they |
+ // _might_ actually give the same result, but we know that given |
+ // this particular initialization they won't. |
+ FieldTrialList::EnableOneTimeRandomization("this is cheating"); |
+ |
+ scoped_refptr<FieldTrial> trials[] = { |
+ new FieldTrial("one", 100, "default", next_year_, 1, 1), |
+ new FieldTrial("two", 100, "default", next_year_, 1, 1), |
+ }; |
+ |
+ for (size_t i = 0; i < arraysize(trials); ++i) { |
+ trials[i]->UseOneTimeRandomization(); |
+ |
+ for (int j = 0; j < 100; ++j) { |
+ trials[i]->AppendGroup("", 1); |
+ } |
+ } |
+ |
+ // The trials are most likely to give different results since they have |
+ // different names. |
+ ASSERT_NE(trials[0]->group(), trials[1]->group()); |
+ ASSERT_NE(trials[0]->group_name(), trials[1]->group_name()); |
+} |
+ |
+TEST_F(FieldTrialTest, DisableImmediately) { |
+ FieldTrial* trial = |
+ new FieldTrial("trial", 100, "default", next_year_, 12, 31); |
+ trial->Disable(); |
+ ASSERT_EQ("default", trial->group_name()); |
+ ASSERT_EQ(FieldTrial::kDefaultGroupNumber, trial->group()); |
+} |
+ |
+TEST_F(FieldTrialTest, DisableAfterInitialization) { |
+ FieldTrial* trial = |
+ new FieldTrial("trial", 100, "default", next_year_, 12, 31); |
+ trial->AppendGroup("non_default", 100); |
+ ASSERT_EQ("non_default", trial->group_name()); |
+ trial->Disable(); |
+ ASSERT_EQ("default", trial->group_name()); |
jar (doing other things)
2011/04/21 22:10:02
Good tests. ;-)
Jói
2011/04/28 01:03:50
Thanks :)
|
+} |
+ |
} // namespace base |