Index: chrome/common/metrics/entropy_provider_unittest.cc |
=================================================================== |
--- chrome/common/metrics/entropy_provider_unittest.cc (revision 0) |
+++ chrome/common/metrics/entropy_provider_unittest.cc (revision 0) |
@@ -0,0 +1,263 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include <limits> |
+ |
+#include "base/basictypes.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/rand_util.h" |
+#include "base/string_number_conversions.h" |
+#include "chrome/common/metrics/entropy_provider.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+ |
+class EntropyProviderTest : public testing::Test { |
+ public: |
+ // Computes SHA1-based entropy for the given |trial_name| based on |
+ // |entropy_source| |
+ double GenerateSHA1Entropy(const std::string& entropy_source, |
+ const std::string& trial_name) { |
+ scoped_ptr<SHA1EntropyProvider> sha1_provider( |
+ new SHA1EntropyProvider(entropy_source)); |
+ return sha1_provider->GetEntropyForTrial(trial_name); |
+ } |
+ |
+ // Generates permutation-based entropy for the given |trial_name| based on |
+ // |entropy_source| which must be in the range [0, entropy_max). |
+ double GeneratePermutedEntropy(uint16 entropy_source, |
+ size_t entropy_max, |
+ const std::string& trial_name) { |
+ scoped_ptr<PermutedEntropyProvider> permuted_provider( |
+ new PermutedEntropyProvider(entropy_source, entropy_max)); |
+ return permuted_provider->GetEntropyForTrial(trial_name); |
+ } |
+}; |
+ |
+ |
+TEST_F(EntropyProviderTest, UseOneTimeRandomizationSHA1) { |
+ // Simply asserts that two trials using one-time randomization |
+ // that have different names, normally generate different results. |
+ // |
+ // Note that depending on the one-time random initialization, they |
+ // _might_ actually give the same result, but we know that given |
+ // the particular client_id we use for unit tests they won't. |
+ base::FieldTrialList field_trial_list(new SHA1EntropyProvider("client_id")); |
+ scoped_refptr<base::FieldTrial> trials[] = { |
+ base::FieldTrialList::FactoryGetFieldTrial("one", 100, "default", |
+ base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
+ base::FieldTrialList::FactoryGetFieldTrial("two", 100, "default", |
+ base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
+ }; |
+ |
+ for (size_t i = 0; i < arraysize(trials); ++i) { |
+ trials[i]->UseOneTimeRandomization(); |
+ |
+ for (int j = 0; j < 100; ++j) |
+ trials[i]->AppendGroup("", 1); |
+ } |
+ |
+ // The trials are most likely to give different results since they have |
+ // different names. |
+ ASSERT_NE(trials[0]->group(), trials[1]->group()); |
+ ASSERT_NE(trials[0]->group_name(), trials[1]->group_name()); |
+} |
+ |
+TEST_F(EntropyProviderTest, UseOneTimeRandomizationPermuted) { |
+ // Simply asserts that two trials using one-time randomization |
+ // that have different names, normally generate different results. |
+ // |
+ // Note that depending on the one-time random initialization, they |
+ // _might_ actually give the same result, but we know that given |
+ // the particular client_id we use for unit tests they won't. |
+ const size_t kMaxEntropySize = (1 << 13); |
+ base::FieldTrialList field_trial_list( |
+ new PermutedEntropyProvider(1234, kMaxEntropySize)); |
+ scoped_refptr<base::FieldTrial> trials[] = { |
+ base::FieldTrialList::FactoryGetFieldTrial("one", 100, "default", |
+ base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
+ base::FieldTrialList::FactoryGetFieldTrial("two", 100, "default", |
+ base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
+ }; |
+ |
+ for (size_t i = 0; i < arraysize(trials); ++i) { |
+ trials[i]->UseOneTimeRandomization(); |
+ |
+ for (int j = 0; j < 100; ++j) |
+ trials[i]->AppendGroup("", 1); |
+ } |
+ |
+ // The trials are most likely to give different results since they have |
+ // different names. |
+ ASSERT_NE(trials[0]->group(), trials[1]->group()); |
+ ASSERT_NE(trials[0]->group_name(), trials[1]->group_name()); |
+} |
+ |
+TEST_F(EntropyProviderTest, SHA1Entropy) { |
+ double results[] = { |
+ GenerateSHA1Entropy("hi", "1"), |
+ GenerateSHA1Entropy("there", "1"), |
+ }; |
+ ASSERT_NE(results[0], results[1]); |
+ for (size_t i = 0; i < arraysize(results); ++i) { |
+ ASSERT_LE(0.0, results[i]); |
+ ASSERT_GT(1.0, results[i]); |
+ } |
+ |
+ ASSERT_EQ(GenerateSHA1Entropy("yo", "1"), |
+ GenerateSHA1Entropy("yo", "1")); |
+ ASSERT_NE(GenerateSHA1Entropy("yo", "something"), |
+ GenerateSHA1Entropy("yo", "else")); |
+} |
+ |
+TEST_F(EntropyProviderTest, PermutedEntropy) { |
+ const size_t kMaxEntropySize = (1 << 13); |
+ double results[] = { |
+ GeneratePermutedEntropy(1234, kMaxEntropySize, "1"), |
+ GeneratePermutedEntropy(4321, kMaxEntropySize, "1"), |
+ }; |
+ ASSERT_NE(results[0], results[1]); |
+ for (size_t i = 0; i < arraysize(results); ++i) { |
+ ASSERT_LE(0.0, results[i]); |
+ ASSERT_GT(1.0, results[i]); |
+ } |
+ |
+ ASSERT_EQ(GeneratePermutedEntropy(1234, kMaxEntropySize, "1"), |
+ GeneratePermutedEntropy(1234, kMaxEntropySize, "1")); |
+ ASSERT_NE(GeneratePermutedEntropy(1234, kMaxEntropySize, "something"), |
+ GeneratePermutedEntropy(1234, kMaxEntropySize, "else")); |
+} |
+ |
+TEST_F(EntropyProviderTest, SHA1EntropyIsUniform) { |
+ // Choose a random start number but go sequentially from there, so |
+ // that each test tries a different range but we never provide uniformly |
+ // distributed input data. |
+ int current_number = base::RandInt(0, std::numeric_limits<int>::max()); |
+ |
+ // The expected value of a random distribution is the average over all |
+ // samples as the number of samples approaches infinity. For a uniform |
+ // distribution from [0.0, 1.0) this would be 0.5. |
+ // |
+ // We do kSamplesBetweenChecks at a time and check if the value has converged |
+ // to a narrow interval around 0.5. A non-uniform distribution would likely |
+ // converge at something different, or not converge consistently within this |
+ // range (i.e. the test would start timing out occasionally). |
+ int kSamplesBetweenChecks = 300; |
+ int num_samples = 0; |
+ double total_value = 0.0; |
+ while (true) { |
+ for (int i = 0; i < kSamplesBetweenChecks; ++i) { |
+ total_value += GenerateSHA1Entropy( |
+ base::IntToString(current_number++), "salt"); |
+ num_samples++; |
+ } |
+ |
+ double average = total_value / num_samples; |
+ double kExpectedMin = 0.48; |
+ double kExpectedMax = 0.52; |
+ |
+ if (num_samples > 1000 && |
+ (average < kExpectedMin || average > kExpectedMax)) { |
+ // Only printed once we have enough samples that it's very unlikely |
+ // things haven't converged. |
+ printf("After %d samples, the average was %f, outside the expected\n" |
+ "range (%f, %f). We will add more samples and check after every\n" |
+ "%d samples. If the average does not converge, something\n" |
+ "is broken. If it does converge, the test will pass.\n", |
+ num_samples, average, |
+ kExpectedMin, kExpectedMax, kSamplesBetweenChecks); |
+ } else { |
+ // Success. |
+ break; |
+ } |
+ } |
+} |
+ |
+TEST_F(EntropyProviderTest, PermutedEntropyIsUniform) { |
+ // Choose a random start number but go sequentially from there, so |
+ // that each test tries a different range but we never provide uniformly |
+ // distributed input data. |
+ const size_t kMaxEntropySize = (1 << 13); |
+ int current_number = base::RandInt(0, kMaxEntropySize - 1); |
+ |
+ // The expected value of a random distribution is the average over all |
+ // samples as the number of samples approaches infinity. For a uniform |
+ // distribution from [0.0, 1.0) this would be 0.5. |
+ // |
+ // We do kSamplesBetweenChecks at a time and check if the value has converged |
+ // to a narrow interval around 0.5. A non-uniform distribution would likely |
+ // converge at something different, or not converge consistently within this |
+ // range (i.e. the test would start timing out occasionally). |
+ int kSamplesBetweenChecks = 300; |
+ int num_samples = 0; |
+ double total_value = 0.0; |
+ while (true) { |
+ for (int i = 0; i < kSamplesBetweenChecks; ++i) { |
+ total_value += GeneratePermutedEntropy(current_number++ % kMaxEntropySize, |
+ kMaxEntropySize, "salt"); |
+ num_samples++; |
+ } |
+ |
+ double average = total_value / num_samples; |
+ double kExpectedMin = 0.48; |
+ double kExpectedMax = 0.52; |
+ |
+ if (num_samples > 1000 && |
+ (average < kExpectedMin || average > kExpectedMax)) { |
+ // Only printed once we have enough samples that it's very unlikely |
+ // things haven't converged. |
+ printf("After %d samples, the average was %f, outside the expected\n" |
+ "range (%f, %f). We will add more samples and check after every\n" |
+ "%d samples. If the average does not converge, something\n" |
+ "is broken. If it does converge, the test will pass.\n", |
+ num_samples, average, |
+ kExpectedMin, kExpectedMax, kSamplesBetweenChecks); |
+ } else { |
+ // Success. |
+ break; |
+ } |
+ } |
+} |
+ |
+TEST(RandUtilTest, SeededRandGeneratorIsUniform) { |
+ // Verifies. that SeededRandGenerator has a uniform distribution. |
+ // |
+ // Mirrors RandUtilTest.RandGeneratorIsUniform in base/rand_util_unittest.cc. |
+ |
+ const uint64 kTopOfRange = (std::numeric_limits<uint64>::max() / 4ULL) * 3ULL; |
+ const uint64 kExpectedAverage = kTopOfRange / 2ULL; |
+ const uint64 kAllowedVariance = kExpectedAverage / 50ULL; // +/- 2% |
+ const int kMinAttempts = 1000; |
+ const int kMaxAttempts = 1000000; |
+ |
+ const std::string trial_names[] = { |
+ "TestTrial", |
+ "AnotherTestTrial", |
+ "NewTabButton", |
+ }; |
+ |
+ for (size_t i = 0; i < arraysize(trial_names); ++i) { |
+ const uint32 seed = internal::HashName(trial_names[i]); |
+ internal::SeededRandGenerator rand_generator(seed); |
+ |
+ double cumulative_average = 0.0; |
+ int count = 0; |
+ while (count < kMaxAttempts) { |
+ uint64 value = rand_generator(kTopOfRange); |
+ cumulative_average = (count * cumulative_average + value) / (count + 1); |
+ |
+ // Don't quit too quickly for things to start converging, or we may have |
+ // a false positive. |
+ if (count > kMinAttempts && |
+ kExpectedAverage - kAllowedVariance < cumulative_average && |
+ cumulative_average < kExpectedAverage + kAllowedVariance) { |
+ break; |
+ } |
+ |
+ ++count; |
+ } |
+ |
+ ASSERT_LT(count, kMaxAttempts) << "Expected average was " << |
+ kExpectedAverage << ", average ended at " << cumulative_average; |
+ } |
+} |
+ |