Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(337)

Unified Diff: net/proxy/multi_threaded_proxy_resolver_unittest.cc

Issue 2822043: Add the capability to run multiple proxy PAC scripts in parallel.... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Re-upload after revert Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/proxy/multi_threaded_proxy_resolver.cc ('k') | net/proxy/proxy_resolver.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/proxy/multi_threaded_proxy_resolver_unittest.cc
===================================================================
--- net/proxy/multi_threaded_proxy_resolver_unittest.cc (revision 51914)
+++ net/proxy/multi_threaded_proxy_resolver_unittest.cc (working copy)
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "net/proxy/multi_threaded_proxy_resolver.h"
+
+#include "base/stl_util-inl.h"
#include "base/string_util.h"
#include "base/waitable_event.h"
#include "googleurl/src/gurl.h"
@@ -10,14 +13,14 @@
#include "net/base/net_errors.h"
#include "net/base/test_completion_callback.h"
#include "net/proxy/proxy_info.h"
-#include "net/proxy/single_threaded_proxy_resolver.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
+
namespace {
// A synchronous mock ProxyResolver implementation, which can be used in
-// conjunction with SingleThreadedProxyResolver.
+// conjunction with MultiThreadedProxyResolver.
// - returns a single-item proxy list with the query's host.
class MockProxyResolver : public ProxyResolver {
public:
@@ -69,6 +72,7 @@
}
int purge_count() const { return purge_count_; }
+ int request_count() const { return request_count_; }
const string16& last_pac_script() const { return last_pac_script_; }
@@ -79,7 +83,7 @@
private:
void CheckIsOnWorkerThread() {
// We should be running on the worker thread -- while we don't know the
- // message loop of SingleThreadedProxyResolver's worker thread, we do
+ // message loop of MultiThreadedProxyResolver's worker thread, we do
// know that it is going to be distinct from the loop running the
// test, so at least make sure it isn't the main loop.
EXPECT_NE(MessageLoop::current(), wrong_loop_);
@@ -141,10 +145,88 @@
base::WaitableEvent blocked_;
};
-TEST(SingleThreadedProxyResolverTest, Basic) {
- MockProxyResolver* mock = new MockProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+// ForwardingProxyResolver forwards all requests to |impl|.
+class ForwardingProxyResolver : public ProxyResolver {
+ public:
+ explicit ForwardingProxyResolver(ProxyResolver* impl)
+ : ProxyResolver(impl->expects_pac_bytes()),
+ impl_(impl) {}
+ virtual int GetProxyForURL(const GURL& query_url,
+ ProxyInfo* results,
+ CompletionCallback* callback,
+ RequestHandle* request,
+ const BoundNetLog& net_log) {
+ return impl_->GetProxyForURL(
+ query_url, results, callback, request, net_log);
+ }
+
+ virtual void CancelRequest(RequestHandle request) {
+ impl_->CancelRequest(request);
+ }
+
+ virtual int SetPacScript(const GURL& pac_url,
+ const string16& script,
+ CompletionCallback* callback) {
+ if (impl_->expects_pac_bytes())
+ return impl_->SetPacScriptByData(script, callback);
+ else
+ return impl_->SetPacScriptByUrl(pac_url, callback);
+ }
+
+ virtual void PurgeMemory() {
+ impl_->PurgeMemory();
+ }
+
+ private:
+ ProxyResolver* impl_;
+};
+
+// This factory returns ProxyResolvers that forward all requests to
+// |resolver|.
+class ForwardingProxyResolverFactory : public ProxyResolverFactory {
+ public:
+ explicit ForwardingProxyResolverFactory(ProxyResolver* resolver)
+ : ProxyResolverFactory(resolver->expects_pac_bytes()),
+ resolver_(resolver) {}
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ return new ForwardingProxyResolver(resolver_);
+ }
+
+ private:
+ ProxyResolver* resolver_;
+};
+
+// This factory returns new instances of BlockableProxyResolver.
+class BlockableProxyResolverFactory : public ProxyResolverFactory {
+ public:
+ BlockableProxyResolverFactory() : ProxyResolverFactory(true) {}
+
+ ~BlockableProxyResolverFactory() {
+ STLDeleteElements(&resolvers_);
+ }
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ BlockableProxyResolver* resolver = new BlockableProxyResolver;
+ resolvers_.push_back(resolver);
+ return new ForwardingProxyResolver(resolver);
+ }
+
+ std::vector<BlockableProxyResolver*> resolvers() {
+ return resolvers_;
+ }
+
+ private:
+ std::vector<BlockableProxyResolver*> resolvers_;
+};
+
+TEST(MultiThreadedProxyResolverTest, SingleThread_Basic) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<MockProxyResolver> mock(new MockProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
+
int rv;
EXPECT_TRUE(resolver.expects_pac_bytes());
@@ -173,7 +255,11 @@
// The mock proxy resolver should have written 1 log entry. And
// on completion, this should have been copied into |log0|.
- EXPECT_EQ(1u, log0.entries().size());
+ // We also have 1 log entry that was emitted by the
+ // MultiThreadedProxyResolver.
+ ASSERT_EQ(2u, log0.entries().size());
+ EXPECT_EQ(NetLog::TYPE_SUBMITTED_TO_RESOLVER_THREAD,
+ log0.entries()[0].type);
// Start 3 more requests (request1 to request3).
@@ -225,12 +311,20 @@
// Tests that the NetLog is updated to include the time the request was waiting
// to be scheduled to a thread.
-TEST(SingleThreadedProxyResolverTest, UpdatesNetLogWithThreadWait) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest,
+ SingleThread_UpdatesNetLogWithThreadWait) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -265,42 +359,53 @@
mock->Unblock();
// Check that request 0 completed as expected.
- // The NetLog only has 1 entry (that came from the mock proxy resolver.)
+ // The NetLog has 1 entry that came from the MultiThreadedProxyResolver, and
+ // 1 entry from the mock proxy resolver.
EXPECT_EQ(0, callback0.WaitForResult());
EXPECT_EQ("PROXY request0:80", results0.ToPacString());
- ASSERT_EQ(1u, log0.entries().size());
+ ASSERT_EQ(2u, log0.entries().size());
+ EXPECT_EQ(NetLog::TYPE_SUBMITTED_TO_RESOLVER_THREAD,
+ log0.entries()[0].type);
// Check that request 1 completed as expected.
EXPECT_EQ(1, callback1.WaitForResult());
EXPECT_EQ("PROXY request1:80", results1.ToPacString());
- ASSERT_EQ(3u, log1.entries().size());
+ ASSERT_EQ(4u, log1.entries().size());
EXPECT_TRUE(LogContainsBeginEvent(
log1.entries(), 0,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
EXPECT_TRUE(LogContainsEndEvent(
log1.entries(), 1,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
// Check that request 2 completed as expected.
EXPECT_EQ(2, callback2.WaitForResult());
EXPECT_EQ("PROXY request2:80", results2.ToPacString());
- ASSERT_EQ(3u, log2.entries().size());
+ ASSERT_EQ(4u, log2.entries().size());
EXPECT_TRUE(LogContainsBeginEvent(
log2.entries(), 0,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
EXPECT_TRUE(LogContainsEndEvent(
log2.entries(), 1,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
}
// Cancel a request which is in progress, and then cancel a request which
// is pending.
-TEST(SingleThreadedProxyResolverTest, CancelRequest) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelRequest) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()),
+ kNumThreads);
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -361,15 +466,22 @@
EXPECT_FALSE(callback2.have_result());
}
-// Test that deleting SingleThreadedProxyResolver while requests are
+// Test that deleting MultiThreadedProxyResolver while requests are
// outstanding cancels them (and doesn't leak anything).
-TEST(SingleThreadedProxyResolverTest, CancelRequestByDeleting) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- scoped_ptr<SingleThreadedProxyResolver> resolver(
- new SingleThreadedProxyResolver(mock));
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelRequestByDeleting) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ scoped_ptr<MultiThreadedProxyResolver> resolver(
+ new MultiThreadedProxyResolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads));
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver->SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -397,14 +509,14 @@
mock->WaitUntilBlocked();
// Add some latency, to improve the chance that when
- // SingleThreadedProxyResolver is deleted below we are still running inside
+ // MultiThreadedProxyResolver is deleted below we are still running inside
// of the worker thread. The test will pass regardless, so this race doesn't
// cause flakiness. However the destruction during execution is a more
// interesting case to test.
mock->SetResolveLatency(100);
// Unblock the worker thread and delete the underlying
- // SingleThreadedProxyResolver immediately.
+ // MultiThreadedProxyResolver immediately.
mock->Unblock();
resolver.reset();
@@ -418,59 +530,213 @@
}
// Cancel an outstanding call to SetPacScriptByData().
-TEST(SingleThreadedProxyResolverTest, CancelSetPacScript) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelSetPacScript) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
int rv;
- // Block the proxy resolver, so no request can complete.
- mock->Block();
+ TestCompletionCallback set_pac_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("data"),
+ &set_pac_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
- // Start request 0.
- ProxyResolver::RequestHandle request0;
- TestCompletionCallback callback0;
- ProxyInfo results0;
+ // Cancel the SetPacScriptByData request.
+ resolver.CancelSetPacScript();
+
+ // Start another SetPacScript request
+ TestCompletionCallback set_pac_script_callback2;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("data2"),
+ &set_pac_script_callback2);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+
+ // Wait for the initialization to complete.
+
+ rv = set_pac_script_callback2.WaitForResult();
+ EXPECT_EQ(0, rv);
+ EXPECT_EQ(ASCIIToUTF16("data2"), mock->last_pac_script());
+
+ // The first SetPacScript callback should never have been completed.
+ EXPECT_FALSE(set_pac_script_callback.have_result());
+}
+
+// Tests setting the PAC script once, lazily creating new threads, and
+// cancelling requests.
+TEST(MultiThreadedProxyResolverTest, ThreeThreads_Basic) {
+ const size_t kNumThreads = 3u;
+ BlockableProxyResolverFactory* factory = new BlockableProxyResolverFactory;
+ MultiThreadedProxyResolver resolver(factory, kNumThreads);
+
+ int rv;
+
+ EXPECT_TRUE(resolver.expects_pac_bytes());
+
+ // Call SetPacScriptByData() -- verify that it reaches the synchronous
+ // resolver.
+ TestCompletionCallback set_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("pac script bytes"),
+ &set_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback.WaitForResult());
+ // One thread has been provisioned (i.e. one ProxyResolver was created).
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[0]->last_pac_script());
+
+ const int kNumRequests = 9;
+ TestCompletionCallback callback[kNumRequests];
+ ProxyInfo results[kNumRequests];
+ ProxyResolver::RequestHandle request[kNumRequests];
+
+ // Start request 0 -- this should run on thread 0 as there is nothing else
+ // going on right now.
rv = resolver.GetProxyForURL(
- GURL("http://request0"), &results0, &callback0, &request0, BoundNetLog());
+ GURL("http://request0"), &results[0], &callback[0], &request[0],
+ BoundNetLog());
EXPECT_EQ(ERR_IO_PENDING, rv);
- // Wait until requests 0 reaches the worker thread.
- mock->WaitUntilBlocked();
+ // Wait for request 0 to finish.
+ rv = callback[0].WaitForResult();
+ EXPECT_EQ(0, rv);
+ EXPECT_EQ("PROXY request0:80", results[0].ToPacString());
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(1, factory->resolvers()[0]->request_count());
- TestCompletionCallback set_pac_script_callback;
- rv = resolver.SetPacScriptByData(ASCIIToUTF16("data"),
- &set_pac_script_callback);
+ MessageLoop::current()->RunAllPending();
+
+ // We now start 8 requests in parallel -- this will cause the maximum of
+ // three threads to be provisioned (an additional two from what we already
+ // have).
+
+ for (int i = 1; i < kNumRequests; ++i) {
+ rv = resolver.GetProxyForURL(
+ GURL(StringPrintf("http://request%d", i)), &results[i], &callback[i],
+ &request[i], BoundNetLog());
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ }
+
+ // We should now have a total of 3 threads, each with its own ProxyResolver
+ // that will get initialized with the same data. (We check this later since
+ // the assignment happens on the worker threads and may not have occurred
+ // yet.)
+ ASSERT_EQ(3u, factory->resolvers().size());
+
+ // Cancel 3 of the 8 oustanding requests.
+ resolver.CancelRequest(request[1]);
+ resolver.CancelRequest(request[3]);
+ resolver.CancelRequest(request[6]);
+
+ // Wait for the remaining requests to complete.
+ int kNonCancelledRequests[] = {2, 4, 5, 7, 8};
+ for (size_t i = 0; i < arraysize(kNonCancelledRequests); ++i) {
+ int request_index = kNonCancelledRequests[i];
+ EXPECT_GE(callback[request_index].WaitForResult(), 0);
+ }
+
+ // Check that the cancelled requests never invoked their callback.
+ EXPECT_FALSE(callback[1].have_result());
+ EXPECT_FALSE(callback[3].have_result());
+ EXPECT_FALSE(callback[6].have_result());
+
+ // We call SetPacScript again, solely to stop the current worker threads.
+ // (That way we can test to see the values observed by the synchronous
+ // resolvers in a non-racy manner).
+ TestCompletionCallback set_script_callback2;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("xyz"), &set_script_callback2);
EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback2.WaitForResult());
+ ASSERT_EQ(4u, factory->resolvers().size());
- // Cancel the SetPacScriptByData request (it can't have finished yet,
- // since the single-thread is currently blocked).
- resolver.CancelSetPacScript();
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[i]->last_pac_script()) << "i=" << i;
+ }
- // Start 1 more request.
+ EXPECT_EQ(ASCIIToUTF16("xyz"),
+ factory->resolvers()[3]->last_pac_script());
- TestCompletionCallback callback1;
- ProxyInfo results1;
+ // We don't know the exact ordering that requests ran on threads with,
+ // but we do know the total count that should have reached the threads.
+ // 8 total were submitted, and three were cancelled. Of the three that
+ // were cancelled, one of them (request 1) was cancelled after it had
+ // already been posted to the worker thread. So the resolvers will
+ // have seen 6 total (and 1 from the run prior).
+ ASSERT_EQ(4u, factory->resolvers().size());
+ int total_count = 0;
+ for (int i = 0; i < 3; ++i) {
+ total_count += factory->resolvers()[i]->request_count();
+ }
+ EXPECT_EQ(7, total_count);
+}
+
+// Tests using two threads. The first request hangs the first thread. Checks
+// that other requests are able to complete while this first request remains
+// stalled.
+TEST(MultiThreadedProxyResolverTest, OneThreadBlocked) {
+ const size_t kNumThreads = 2u;
+ BlockableProxyResolverFactory* factory = new BlockableProxyResolverFactory;
+ MultiThreadedProxyResolver resolver(factory, kNumThreads);
+
+ int rv;
+
+ EXPECT_TRUE(resolver.expects_pac_bytes());
+
+ // Initialize the resolver.
+ TestCompletionCallback set_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("pac script bytes"),
+ &set_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback.WaitForResult());
+ // One thread has been provisioned (i.e. one ProxyResolver was created).
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[0]->last_pac_script());
+
+ const int kNumRequests = 4;
+ TestCompletionCallback callback[kNumRequests];
+ ProxyInfo results[kNumRequests];
+ ProxyResolver::RequestHandle request[kNumRequests];
+
+ // Start a request that will block the first thread.
+
+ factory->resolvers()[0]->Block();
+
rv = resolver.GetProxyForURL(
- GURL("http://request1"), &results1, &callback1, NULL, BoundNetLog());
+ GURL("http://request0"), &results[0], &callback[0], &request[0],
+ BoundNetLog());
+
EXPECT_EQ(ERR_IO_PENDING, rv);
+ factory->resolvers()[0]->WaitUntilBlocked();
- // Unblock the worker thread so the requests can continue running.
- mock->Unblock();
+ // Start 3 more requests -- they should all be serviced by thread #2
+ // since thread #1 is blocked.
- // Wait for requests 0 and 1 to finish.
+ for (int i = 1; i < kNumRequests; ++i) {
+ rv = resolver.GetProxyForURL(
+ GURL(StringPrintf("http://request%d", i)),
+ &results[i], &callback[i], &request[i], BoundNetLog());
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ }
- rv = callback0.WaitForResult();
- EXPECT_EQ(0, rv);
- EXPECT_EQ("PROXY request0:80", results0.ToPacString());
+ // Wait for the three requests to complete (they should complete in FIFO
+ // order).
+ for (int i = 1; i < kNumRequests; ++i) {
+ EXPECT_EQ(i - 1, callback[i].WaitForResult());
+ }
- rv = callback1.WaitForResult();
- EXPECT_EQ(1, rv);
- EXPECT_EQ("PROXY request1:80", results1.ToPacString());
+ // Unblock the first thread.
+ factory->resolvers()[0]->Unblock();
+ EXPECT_EQ(0, callback[0].WaitForResult());
- // The SetPacScript callback should never have been completed.
- EXPECT_FALSE(set_pac_script_callback.have_result());
+ // All in all, the first thread should have seen just 1 request. And the
+ // second thread 3 requests.
+ ASSERT_EQ(2u, factory->resolvers().size());
+ EXPECT_EQ(1, factory->resolvers()[0]->request_count());
+ EXPECT_EQ(3, factory->resolvers()[1]->request_count());
}
} // namespace
+
} // namespace net
« no previous file with comments | « net/proxy/multi_threaded_proxy_resolver.cc ('k') | net/proxy/proxy_resolver.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698