Chromium Code Reviews| Index: chrome/common/metrics/entropy_provider_unittest.cc |
| =================================================================== |
| --- chrome/common/metrics/entropy_provider_unittest.cc (revision 0) |
| +++ chrome/common/metrics/entropy_provider_unittest.cc (revision 0) |
| @@ -0,0 +1,274 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include <cmath> |
| +#include <limits> |
| +#include <numeric> |
| + |
| +#include "base/basictypes.h" |
| +#include "base/guid.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/rand_util.h" |
| +#include "base/string_number_conversions.h" |
| +#include "chrome/common/metrics/entropy_provider.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| + |
| +namespace { |
| + |
| +// Computes the standard deviation of distribution |values|. |
| +double ComputeStandardDeviation(const std::vector<int>& values) { |
| + const int sum = std::accumulate(values.begin(), values.end(), 0.0); |
| + const int mean = sum / values.size(); |
|
Ilya Sherman
2012/08/21 03:42:27
nit: Should this be a double rather than an int?
Alexei Svitkine (slow)
2012/08/21 14:25:49
Changed to use doubles throughout the function.
(
|
| + const int sum_of_squares = std::inner_product(values.begin(), values.end(), |
| + values.begin(), 0.0); |
| + const double variance = static_cast<double>(sum_of_squares) / |
| + values.size() - (mean * mean); |
| + return std::sqrt(variance); |
| +} |
| + |
| +} // namespace |
| + |
| + |
| +class EntropyProviderTest : public testing::Test { |
| + public: |
| + // Computes SHA1-based entropy for the given |trial_name| based on |
| + // |entropy_source| |
| + double GenerateSHA1Entropy(const std::string& entropy_source, |
| + const std::string& trial_name) { |
| + SHA1EntropyProvider sha1_provider(entropy_source); |
| + return sha1_provider.GetEntropyForTrial(trial_name); |
| + } |
| + |
| + // Generates permutation-based entropy for the given |trial_name| based on |
| + // |entropy_source| which must be in the range [0, entropy_max). |
| + double GeneratePermutedEntropy(uint16 entropy_source, |
| + size_t entropy_max, |
| + const std::string& trial_name) { |
| + PermutedEntropyProvider permuted_provider(entropy_source, entropy_max); |
| + return permuted_provider.GetEntropyForTrial(trial_name); |
| + } |
| +}; |
| + |
| + |
| +TEST_F(EntropyProviderTest, UseOneTimeRandomizationSHA1) { |
| + // Simply asserts that two trials using one-time randomization |
| + // that have different names, normally generate different results. |
| + // |
| + // Note that depending on the one-time random initialization, they |
| + // _might_ actually give the same result, but we know that given |
| + // the particular client_id we use for unit tests they won't. |
| + base::FieldTrialList field_trial_list(new SHA1EntropyProvider("client_id")); |
| + scoped_refptr<base::FieldTrial> trials[] = { |
| + base::FieldTrialList::FactoryGetFieldTrial("one", 100, "default", |
| + base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
| + base::FieldTrialList::FactoryGetFieldTrial("two", 100, "default", |
| + base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
| + }; |
| + |
| + for (size_t i = 0; i < arraysize(trials); ++i) { |
| + trials[i]->UseOneTimeRandomization(); |
| + |
| + for (int j = 0; j < 100; ++j) |
| + trials[i]->AppendGroup("", 1); |
| + } |
| + |
| + // The trials are most likely to give different results since they have |
| + // different names. |
| + ASSERT_NE(trials[0]->group(), trials[1]->group()); |
| + ASSERT_NE(trials[0]->group_name(), trials[1]->group_name()); |
| +} |
| + |
| +TEST_F(EntropyProviderTest, UseOneTimeRandomizationPermuted) { |
| + // Simply asserts that two trials using one-time randomization |
| + // that have different names, normally generate different results. |
| + // |
| + // Note that depending on the one-time random initialization, they |
| + // _might_ actually give the same result, but we know that given |
| + // the particular client_id we use for unit tests they won't. |
| + const size_t kMaxLowEntropySize = (1 << 13); |
| + base::FieldTrialList field_trial_list( |
| + new PermutedEntropyProvider(1234, kMaxLowEntropySize)); |
| + scoped_refptr<base::FieldTrial> trials[] = { |
| + base::FieldTrialList::FactoryGetFieldTrial("one", 100, "default", |
| + base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
| + base::FieldTrialList::FactoryGetFieldTrial("two", 100, "default", |
| + base::FieldTrialList::kExpirationYearInFuture, 1, 1, NULL), |
| + }; |
| + |
| + for (size_t i = 0; i < arraysize(trials); ++i) { |
| + trials[i]->UseOneTimeRandomization(); |
| + |
| + for (int j = 0; j < 100; ++j) |
| + trials[i]->AppendGroup("", 1); |
| + } |
| + |
| + // The trials are most likely to give different results since they have |
| + // different names. |
| + ASSERT_NE(trials[0]->group(), trials[1]->group()); |
| + ASSERT_NE(trials[0]->group_name(), trials[1]->group_name()); |
| +} |
| + |
| +TEST_F(EntropyProviderTest, SHA1Entropy) { |
| + const double results[] = { |
| + GenerateSHA1Entropy("hi", "1"), |
| + GenerateSHA1Entropy("there", "1"), |
| + }; |
| + ASSERT_NE(results[0], results[1]); |
| + for (size_t i = 0; i < arraysize(results); ++i) { |
| + ASSERT_LE(0.0, results[i]); |
| + ASSERT_GT(1.0, results[i]); |
| + } |
| + |
| + ASSERT_EQ(GenerateSHA1Entropy("yo", "1"), |
| + GenerateSHA1Entropy("yo", "1")); |
| + ASSERT_NE(GenerateSHA1Entropy("yo", "something"), |
| + GenerateSHA1Entropy("yo", "else")); |
| +} |
| + |
| +TEST_F(EntropyProviderTest, PermutedEntropy) { |
| + const size_t kMaxLowEntropySize = (1 << 13); |
| + const double results[] = { |
| + GeneratePermutedEntropy(1234, kMaxLowEntropySize, "1"), |
| + GeneratePermutedEntropy(4321, kMaxLowEntropySize, "1"), |
| + }; |
| + ASSERT_NE(results[0], results[1]); |
| + for (size_t i = 0; i < arraysize(results); ++i) { |
| + ASSERT_LE(0.0, results[i]); |
| + ASSERT_GT(1.0, results[i]); |
| + } |
| + |
| + ASSERT_EQ(GeneratePermutedEntropy(1234, kMaxLowEntropySize, "1"), |
| + GeneratePermutedEntropy(1234, kMaxLowEntropySize, "1")); |
| + ASSERT_NE(GeneratePermutedEntropy(1234, kMaxLowEntropySize, "something"), |
| + GeneratePermutedEntropy(1234, kMaxLowEntropySize, "else")); |
| +} |
| + |
| +TEST_F(EntropyProviderTest, SHA1EntropyIsUniform) { |
| + const size_t kMaxLowEntropySize = (1 << 13); |
| + const int kBucketCount = 20; |
| + const int kIterationCount = 100000; |
|
Ilya Sherman
2012/08/21 03:42:27
nit: size_t throughout?
Alexei Svitkine (slow)
2012/08/21 14:25:49
Done.
|
| + |
| + const std::string trial_names[] = { |
| + "TestTrial", |
| + "AnotherTestTrial", |
| + "NewTabButton", |
| + }; |
| + |
| + for (size_t i = 0; i < arraysize(trial_names); ++i) { |
| + std::vector<int> distribution(kBucketCount); |
| + |
| + for (int j = 0; j < kIterationCount; ++j) { |
| + // Use a random GUID + 13 bits of entropy to match how the |
| + // SHA1EntropyProvider is used in metrics_service.cc. |
| + const int low_entropy_source = |
| + static_cast<uint16>(base::RandInt(0, kMaxLowEntropySize - 1)); |
| + const std::string high_entropy_source = |
| + base::GenerateGUID() + base::IntToString(low_entropy_source); |
| + const double entropy_value = |
| + GenerateSHA1Entropy(high_entropy_source, trial_names[i]); |
| + const int bucket = static_cast<int>(kBucketCount * entropy_value); |
| + ASSERT_LT(bucket, kBucketCount); |
| + distribution[bucket] += 1; |
| + } |
| + |
| + int min_value = std::numeric_limits<int>::max(); |
| + int max_value = std::numeric_limits<int>::min(); |
| + for (int j = 0; j < kBucketCount; ++j) { |
| + min_value = std::min(min_value, distribution[j]); |
| + max_value = std::max(max_value, distribution[j]); |
| + } |
| + |
| + // TODO(asvitkine): Figure out pass / fail criteria. |
| + printf("min = %d, max = %d, stddev = %f\n", min_value, max_value, |
| + ComputeStandardDeviation(distribution)); |
| + } |
| +} |
| + |
| +TEST_F(EntropyProviderTest, PermutedEntropyIsUniform) { |
| + const size_t kMaxLowEntropySize = (1 << 13); |
| + const int kBucketCount = 20; |
| + const int kIterationCount = 100000; |
| + |
| + const std::string trial_names[] = { |
| + "TestTrial", |
| + "AnotherTestTrial", |
| + "NewTabButton", |
| + }; |
| + |
| + for (size_t i = 0; i < arraysize(trial_names); ++i) { |
| + std::vector<int> distribution(kBucketCount); |
| + |
| + // Note: Given a trial name, the computed mapping will be the same. |
| + // As a performance optimization, pre-compute the mapping once per trial |
| + // name and index into it each iteration. |
|
Ilya Sherman
2012/08/21 03:42:27
Is this performance optimization needed for the te
Alexei Svitkine (slow)
2012/08/21 14:25:49
Unfortunately, yes it is needed. Without it, this
|
| + std::vector<uint16> mapping(kMaxLowEntropySize); |
| + metrics_internal::PermuteMappingUsingTrialName(trial_names[i], &mapping); |
| + |
| + for (int j = 0; j < kIterationCount; ++j) { |
| + const int low_entropy_source = |
| + static_cast<uint16>(base::RandInt(0, kMaxLowEntropySize - 1)); |
| + const double entropy_value = |
| + mapping[low_entropy_source] / static_cast<double>(kMaxLowEntropySize); |
| + const int bucket = static_cast<int>(kBucketCount * entropy_value); |
| + ASSERT_LT(bucket, kBucketCount); |
| + distribution[bucket] += 1; |
| + } |
| + |
| + int min_value = std::numeric_limits<int>::max(); |
| + int max_value = std::numeric_limits<int>::min(); |
| + for (int j = 0; j < kBucketCount; ++j) { |
| + min_value = std::min(min_value, distribution[j]); |
| + max_value = std::max(max_value, distribution[j]); |
| + } |
| + |
| + // TODO(asvitkine): Figure out pass / fail criteria. |
| + printf("min = %d, max = %d, stddev = %f\n", min_value, max_value, |
| + ComputeStandardDeviation(distribution)); |
| + } |
| +} |
| + |
| +TEST_F(EntropyProviderTest, SeededRandGeneratorIsUniform) { |
| + // Verifies that SeededRandGenerator has a uniform distribution. |
| + // |
| + // Mirrors RandUtilTest.RandGeneratorIsUniform in base/rand_util_unittest.cc. |
| + |
| + const uint32 kTopOfRange = (std::numeric_limits<uint32>::max() / 4ULL) * 3ULL; |
| + const uint32 kExpectedAverage = kTopOfRange / 2ULL; |
| + const uint32 kAllowedVariance = kExpectedAverage / 50ULL; // +/- 2% |
| + const int kMinAttempts = 1000; |
| + const int kMaxAttempts = 1000000; |
| + |
| + const std::string trial_names[] = { |
| + "TestTrial", |
| + "AnotherTestTrial", |
| + "NewTabButton", |
| + }; |
| + |
| + for (size_t i = 0; i < arraysize(trial_names); ++i) { |
| + const uint32 seed = metrics_internal::HashName(trial_names[i]); |
| + metrics_internal::SeededRandGenerator rand_generator(seed); |
| + |
| + double cumulative_average = 0.0; |
| + int count = 0; |
| + while (count < kMaxAttempts) { |
| + uint32 value = rand_generator(kTopOfRange); |
| + cumulative_average = (count * cumulative_average + value) / (count + 1); |
| + |
| + // Don't quit too quickly for things to start converging, or we may have |
| + // a false positive. |
| + if (count > kMinAttempts && |
| + kExpectedAverage - kAllowedVariance < cumulative_average && |
| + cumulative_average < kExpectedAverage + kAllowedVariance) { |
| + break; |
| + } |
| + |
| + ++count; |
| + } |
| + |
| + ASSERT_LT(count, kMaxAttempts) << "Expected average was " << |
| + kExpectedAverage << ", average ended at " << cumulative_average << |
| + ", for trial " << trial_names[i]; |
| + } |
| +} |
| + |