| Index: tools/testrunner/network/endpoint.py
|
| diff --git a/tools/testrunner/network/endpoint.py b/tools/testrunner/network/endpoint.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..b2f8a0581a6a96cdb2313d814cccd720be9373c0
|
| --- /dev/null
|
| +++ b/tools/testrunner/network/endpoint.py
|
| @@ -0,0 +1,123 @@
|
| +# Copyright 2012 the V8 project authors. All rights reserved.
|
| +# Redistribution and use in source and binary forms, with or without
|
| +# modification, are permitted provided that the following conditions are
|
| +# met:
|
| +#
|
| +# * Redistributions of source code must retain the above copyright
|
| +# notice, this list of conditions and the following disclaimer.
|
| +# * Redistributions in binary form must reproduce the above
|
| +# copyright notice, this list of conditions and the following
|
| +# disclaimer in the documentation and/or other materials provided
|
| +# with the distribution.
|
| +# * Neither the name of Google Inc. nor the names of its
|
| +# contributors may be used to endorse or promote products derived
|
| +# from this software without specific prior written permission.
|
| +#
|
| +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| +
|
| +
|
| +import multiprocessing
|
| +import os
|
| +import Queue
|
| +import threading
|
| +import time
|
| +
|
| +from ..local import execution
|
| +from ..local import progress
|
| +from ..local import testsuite
|
| +from ..local import utils
|
| +from ..server import compression
|
| +
|
| +
|
| +class EndpointProgress(progress.ProgressIndicator):
|
| + def __init__(self, sock, server, ctx):
|
| + super(EndpointProgress, self).__init__()
|
| + self.sock = sock
|
| + self.server = server
|
| + self.context = ctx
|
| + self.results_queue = [] # Accessors must synchronize themselves.
|
| + self.senderthread = threading.Thread(target=self._SenderThread)
|
| + self.senderthread.start()
|
| + self.done_lock = threading.Lock()
|
| + self.done_lock.acquire()
|
| +
|
| + def HasRun(self, test):
|
| + # The runners that call this have a lock anyway, so this is safe.
|
| + self.results_queue.append(test)
|
| +
|
| + def _SenderThread(self):
|
| + queuetime = 0.0
|
| + packtime = 0.0
|
| + compresstime = 0.0
|
| + comparetime = 0.0
|
| + keep_running = True
|
| + tests = []
|
| + while keep_running:
|
| + time.sleep(0.1)
|
| + t1 = time.time()
|
| + # This should be "atomic enough" without locking :-)
|
| + # (We don't care which list any new elements get appended to, as long
|
| + # as we don't lose any and the last one comes last.)
|
| + current = self.results_queue
|
| + self.results_queue = []
|
| + for c in current:
|
| + if c is None:
|
| + keep_running = False
|
| + else:
|
| + tests.append(c)
|
| + if keep_running and len(tests) < 1:
|
| + continue # Wait for more results.
|
| + t2 = time.time()
|
| + result = []
|
| + for t in tests:
|
| + result.append(t.PackResult())
|
| + t3 = time.time()
|
| + compression.Send(result, self.sock)
|
| + t4 = time.time()
|
| + for t in tests:
|
| + self.server.CompareOwnPerf(t, self.context.arch, self.context.mode)
|
| + comparetime += time.time() - t4
|
| + compresstime += t4 - t3
|
| + packtime += t3 - t2
|
| + queuetime += t2 - t1
|
| + tests = []
|
| + self.done_lock.release()
|
| +
|
| +
|
| +def Execute(workspace, ctx, tests, sock, server):
|
| + suite_paths = utils.GetSuitePaths(os.path.join(workspace, "test"))
|
| + suites = []
|
| + for root in suite_paths:
|
| + suite = testsuite.TestSuite.LoadTestSuite(
|
| + os.path.join(workspace, "test", root))
|
| + if suite:
|
| + suites.append(suite)
|
| +
|
| + suites_dict = {}
|
| + for s in suites:
|
| + suites_dict[s.name] = s
|
| + s.tests = []
|
| + for t in tests:
|
| + suite = suites_dict[t.suite]
|
| + t.suite = suite
|
| + suite.tests.append(t)
|
| +
|
| + suites = [ s for s in suites if len(s.tests) > 0 ]
|
| + for s in suites:
|
| + s.DownloadData()
|
| +
|
| + progress_indicator = EndpointProgress(sock, server, ctx)
|
| + runner = execution.Runner(suites, progress_indicator, ctx)
|
| + runner.Run(server.jobs)
|
| + progress_indicator.HasRun(None) # Sentinel to signal the end.
|
| + progress_indicator.done_lock.acquire() # Released when the indicator is done.
|
|
|