| Index: appengine/swarming/server/task_to_run_test.py
|
| diff --git a/appengine/swarming/server/task_to_run_test.py b/appengine/swarming/server/task_to_run_test.py
|
| index e09443f398a51f66aba551929ccde25ffd723b8a..72872fefd599fd0c84ccb424f6469471b5021880 100755
|
| --- a/appengine/swarming/server/task_to_run_test.py
|
| +++ b/appengine/swarming/server/task_to_run_test.py
|
| @@ -35,17 +35,20 @@ from server import task_to_run
|
| # Method could be a function - pylint: disable=R0201
|
|
|
|
|
| +def _flatten(request_dimensions):
|
| + return [u'%s:%s' % (k, v) for k, v in request_dimensions]
|
| +
|
| +
|
| def _gen_request(properties=None, **kwargs):
|
| """Creates a TaskRequest."""
|
| props = {
|
| 'command': [u'command1'],
|
| - 'dimensions': {u'pool': u'default'},
|
| + 'dimensions_flat': [u'pool:default'],
|
| 'env': {},
|
| 'execution_timeout_secs': 24*60*60,
|
| 'io_timeout_secs': None,
|
| }
|
| props.update(properties or {})
|
| - props['dimensions_dict'] = props.pop('dimensions')
|
| now = utils.utcnow()
|
| args = {
|
| 'created_ts': now,
|
| @@ -189,12 +192,12 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
|
|
| def test_new_task_to_run(self):
|
| self.mock(random, 'getrandbits', lambda _: 0x12)
|
| - request_dimensions = {u'os': u'Windows-3.1.1', u'pool': u'default'}
|
| + request_dimensions = [(u'os', u'Windows-3.1.1'), (u'pool', u'default')]
|
| now = utils.utcnow()
|
| data = _gen_request(
|
| properties={
|
| 'command': [u'command1', u'arg1'],
|
| - 'dimensions': request_dimensions,
|
| + 'dimensions_flat': _flatten(request_dimensions),
|
| 'env': {u'foo': u'bar'},
|
| 'execution_timeout_secs': 30,
|
| },
|
| @@ -208,7 +211,7 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| data = _gen_request(
|
| properties={
|
| 'command': [u'command1', u'arg1'],
|
| - 'dimensions': request_dimensions,
|
| + 'dimensions_flat': _flatten(request_dimensions),
|
| 'env': {u'foo': u'bar'},
|
| 'execution_timeout_secs': 30,
|
| },
|
| @@ -246,11 +249,11 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
|
|
| def test_match_dimensions(self):
|
| data_true = (
|
| - ({}, {}),
|
| - ({}, {'a': 'b'}),
|
| - ({'a': 'b'}, {'a': 'b'}),
|
| - ({'os': 'amiga'}, {'os': ['amiga', 'amiga-3.1']}),
|
| - ( {'os': 'amiga', 'foo': 'bar'},
|
| + ([], {}),
|
| + ([], {'a': 'b'}),
|
| + ([('a', 'b')], {'a': 'b'}),
|
| + ([('os', 'amiga')], {'os': ['amiga', 'amiga-3.1']}),
|
| + ( [('os', 'amiga'), ('foo', 'bar')],
|
| {'os': ['amiga', 'amiga-3.1'], 'a': 'b', 'foo': 'bar'}),
|
| )
|
|
|
| @@ -260,7 +263,7 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| task_to_run.match_dimensions(request_dimensions, bot_dimensions))
|
|
|
| data_false = (
|
| - ({'os': 'amiga'}, {'os': ['Win', 'Win-3.1']}),
|
| + ([('os', 'amiga')], {'os': ['Win', 'Win-3.1']}),
|
| )
|
| for request_dimensions, bot_dimensions in data_false:
|
| self.assertEqual(
|
| @@ -268,20 +271,24 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| task_to_run.match_dimensions(request_dimensions, bot_dimensions))
|
|
|
| def test_yield_next_available_task_to_dispatch_none(self):
|
| + request_dimensions = [
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties={
|
| - 'dimensions': {u'os': u'Windows-3.1.1', u'pool': u'default'},
|
| - })
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| # Bot declares no dimensions, so it will fail to match.
|
| bot_dimensions = {u'id': [u'bot1'], u'pool': [u'default']}
|
| actual = _yield_next_available_task_to_dispatch(bot_dimensions, None)
|
| self.assertEqual([], actual)
|
|
|
| def test_yield_next_available_task_to_dispatch_none_mismatch(self):
|
| + request_dimensions = [
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties={
|
| - 'dimensions': {u'os': u'Windows-3.1.1', u'pool': u'default'},
|
| - })
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| # Bot declares other dimensions, so it will fail to match.
|
| bot_dimensions = {
|
| u'id': [u'bot1'],
|
| @@ -292,15 +299,15 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual([], actual)
|
|
|
| def test_yield_next_available_task_to_dispatch(self):
|
| - request_dimensions = {
|
| - u'foo': u'bar',
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| + request_dimensions = [
|
| + (u'foo', u'bar'),
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions))
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| # Bot declares exactly same dimensions so it matches.
|
| - bot_dimensions = {k: [v] for k, v in request_dimensions.iteritems()}
|
| + bot_dimensions = {k: [v] for k, v in request_dimensions}
|
| bot_dimensions[u'id'] = [u'bot1']
|
| actual = _yield_next_available_task_to_dispatch(bot_dimensions, None)
|
| expected = [
|
| @@ -313,12 +320,12 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(expected, actual)
|
|
|
| def test_yield_next_available_task_to_dispatch_subset(self):
|
| - request_dimensions = {
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| + request_dimensions = [
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions))
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| # Bot declares more dimensions than needed, this is fine and it matches.
|
| bot_dimensions = {
|
| u'id': [u'localhost'],
|
| @@ -336,12 +343,13 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(expected, actual)
|
|
|
| def test_yield_next_available_task_shard(self):
|
| - request_dimensions = {
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| - self._gen_new_task_to_run(properties=dict(dimensions=request_dimensions))
|
| - bot_dimensions = {k: [v] for k, v in request_dimensions.iteritems()}
|
| + request_dimensions = [
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| + self._gen_new_task_to_run(
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| + bot_dimensions = {k: [v] for k, v in request_dimensions}
|
| bot_dimensions[u'id'] = [u'bot1']
|
| actual = _yield_next_available_task_to_dispatch(bot_dimensions, None)
|
| expected = [
|
| @@ -354,12 +362,12 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(expected, actual)
|
|
|
| def test_yield_next_available_task_to_dispatch_subset_multivalue(self):
|
| - request_dimensions = {
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| + request_dimensions = [
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions))
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)))
|
| # Bot declares more dimensions than needed.
|
| bot_dimensions = {
|
| u'id': [u'localhost'],
|
| @@ -378,18 +386,20 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
|
|
| def test_yield_next_available_task_to_dispatch_multi_normal(self):
|
| # Task added one after the other, normal case.
|
| - request_dimensions_1 = {
|
| - u'foo': u'bar',
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| - self._gen_new_task_to_run(properties=dict(dimensions=request_dimensions_1))
|
| + request_dimensions_1 = [
|
| + (u'foo', u'bar'),
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| + self._gen_new_task_to_run(
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_1)))
|
|
|
| # It's normally time ordered.
|
| self.mock_now(self.now, 1)
|
| - request_dimensions_2 = {u'id': u'localhost', u'pool': u'default'}
|
| + request_dimensions_2 = [(u'id', u'localhost'), (u'pool', u'default')]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions_2), nb_task=0)
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_2)),
|
| + nb_task=0)
|
|
|
| bot_dimensions = {
|
| u'foo': [u'bar'],
|
| @@ -418,19 +428,21 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| # value) but with a timestamp sooner (for example, time desynchronization
|
| # between machines) is still returned in the timestamp order, e.g. priority
|
| # is done based on timestamps and priority only.
|
| - request_dimensions_1 = {
|
| - u'foo': u'bar',
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| - self._gen_new_task_to_run(properties=dict(dimensions=request_dimensions_1))
|
| + request_dimensions_1 = [
|
| + (u'foo', u'bar'),
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| + self._gen_new_task_to_run(
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_1)))
|
|
|
| # The second shard is added before the first, potentially because of a
|
| # desynchronized clock. It'll have higher priority.
|
| self.mock_now(self.now, -1)
|
| - request_dimensions_2 = {u'id': u'localhost', u'pool': u'default'}
|
| + request_dimensions_2 = [(u'id', u'localhost'), (u'pool', u'default')]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions_2), nb_task=0)
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_2)),
|
| + nb_task=0)
|
|
|
| bot_dimensions = {
|
| u'foo': [u'bar'],
|
| @@ -457,15 +469,17 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
|
|
| def test_yield_next_available_task_to_dispatch_priority(self):
|
| # Task added later but with higher priority are returned first.
|
| - request_dimensions_1 = {u'os': u'Windows-3.1.1', u'pool': u'default'}
|
| - self._gen_new_task_to_run(properties=dict(dimensions=request_dimensions_1))
|
| + request_dimensions_1 = [(u'os', u'Windows-3.1.1'), (u'pool', u'default')]
|
| + self._gen_new_task_to_run(
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_1)))
|
|
|
| # This one is later but has higher priority.
|
| self.mock_now(self.now, 60)
|
| - request_dimensions_2 = {u'os': u'Windows-3.1.1', u'pool': u'default'}
|
| + request_dimensions_2 = [(u'os', u'Windows-3.1.1'), (u'pool', u'default')]
|
| request = self.mkreq(
|
| _gen_request(
|
| - properties=dict(dimensions=request_dimensions_2), priority=10),
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions_2)),
|
| + priority=10),
|
| nb_task=0)
|
| task_to_run.new_task_to_run(request).put()
|
|
|
| @@ -491,31 +505,35 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(expected, actual)
|
|
|
| def test_yield_next_available_task_to_run_task_exceeds_deadline(self):
|
| - request_dimensions = {
|
| - u'foo': u'bar',
|
| - u'id': u'localhost',
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| + request_dimensions = [
|
| + (u'foo', u'bar'),
|
| + (u'id', u'localhost'),
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions), nb_task=0)
|
| + properties={
|
| + 'dimensions_flat': [u'%s:%s' % (k, v) for k, v in request_dimensions],
|
| + },
|
| + nb_task=0)
|
| # Bot declares exactly same dimensions so it matches.
|
| - bot_dimensions = {k: [v] for k, v in request_dimensions.iteritems()}
|
| + bot_dimensions = {k: [v] for k, v in request_dimensions}
|
| actual = _yield_next_available_task_to_dispatch(
|
| bot_dimensions, datetime.datetime(1969, 1, 1))
|
| self.failIf(actual)
|
|
|
| def test_yield_next_available_task_to_run_task_meets_deadline(self):
|
| - request_dimensions = {
|
| - u'foo': u'bar',
|
| - u'id': u'localhost',
|
| - u'os': u'Windows-3.1.1',
|
| - u'pool': u'default',
|
| - }
|
| + request_dimensions = [
|
| + (u'foo', u'bar'),
|
| + (u'id', u'localhost'),
|
| + (u'os', u'Windows-3.1.1'),
|
| + (u'pool', u'default'),
|
| + ]
|
| self._gen_new_task_to_run(
|
| - properties=dict(dimensions=request_dimensions), nb_task=0)
|
| + properties=dict(dimensions_flat=_flatten(request_dimensions)),
|
| + nb_task=0)
|
| # Bot declares exactly same dimensions so it matches.
|
| - bot_dimensions = {k: [v] for k, v in request_dimensions.iteritems()}
|
| + bot_dimensions = {k: [v] for k, v in request_dimensions}
|
| actual = _yield_next_available_task_to_dispatch(
|
| bot_dimensions, datetime.datetime(3000, 1, 1))
|
| expected = [
|
| @@ -528,18 +546,18 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(expected, actual)
|
|
|
| def test_yield_next_available_task_to_run_task_terminate(self):
|
| - request_dimensions = {
|
| - u'id': u'fake-id',
|
| - }
|
| + request_dimensions = [
|
| + (u'id', u'fake-id'),
|
| + ]
|
| task = self._gen_new_task_to_run(
|
| priority=0,
|
| properties=dict(
|
| - command=[], dimensions=request_dimensions, execution_timeout_secs=0,
|
| - grace_period_secs=0),
|
| + command=[], dimensions_flat=_flatten(request_dimensions),
|
| + execution_timeout_secs=0, grace_period_secs=0),
|
| nb_task=0)
|
| self.assertTrue(task.key.parent().get().properties.is_terminate)
|
| # Bot declares exactly same dimensions so it matches.
|
| - bot_dimensions = {k: [v] for k, v in request_dimensions.iteritems()}
|
| + bot_dimensions = {k: [v] for k, v in request_dimensions}
|
| bot_dimensions[u'pool'] = [u'default']
|
| actual = _yield_next_available_task_to_dispatch(bot_dimensions, 0)
|
| expected = [
|
| @@ -573,9 +591,9 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| 1, len(list(task_to_run.yield_expired_task_to_run())))
|
|
|
| def test_is_reapable(self):
|
| - req_dimensions = {u'os': u'Windows-3.1.1', u'pool': u'default'}
|
| + req_dimensions = [(u'os', u'Windows-3.1.1'), (u'pool', u'default')]
|
| to_run = self._gen_new_task_to_run(
|
| - properties=dict(dimensions=req_dimensions))
|
| + properties=dict(dimensions_flat=_flatten(req_dimensions)))
|
| bot_dimensions = {
|
| u'id': [u'localhost'],
|
| u'os': [u'Windows-3.1.1'],
|
| @@ -590,10 +608,9 @@ class TaskToRunApiTest(test_env_handlers.AppTestBase):
|
| self.assertEqual(False, to_run.is_reapable)
|
|
|
| def test_set_lookup_cache(self):
|
| + req_dimensions = [(u'os', u'Windows-3.1.1'), (u'pool', u'default')]
|
| to_run = self._gen_new_task_to_run(
|
| - properties={
|
| - 'dimensions': {u'os': u'Windows-3.1.1', u'pool': u'default'},
|
| - })
|
| + properties=dict(dimensions_flat=_flatten(req_dimensions)))
|
| self.assertEqual(False, task_to_run._lookup_cache_is_taken(to_run.key))
|
| task_to_run.set_lookup_cache(to_run.key, True)
|
| self.assertEqual(False, task_to_run._lookup_cache_is_taken(to_run.key))
|
|
|