Index: telemetry/telemetry/benchmark_runner.py |
diff --git a/telemetry/telemetry/benchmark_runner.py b/telemetry/telemetry/benchmark_runner.py |
index 3828bad16db2fd99cd99949992eba94926e5cd7d..a3387d72c4aede1549172245469e194a4f8a6812 100644 |
--- a/telemetry/telemetry/benchmark_runner.py |
+++ b/telemetry/telemetry/benchmark_runner.py |
@@ -8,7 +8,6 @@ Handles benchmark configuration, but all the logic for |
actually running the benchmark is in Benchmark and PageRunner.""" |
import argparse |
-import hashlib |
import json |
import logging |
import os |
@@ -23,6 +22,7 @@ from telemetry.internal.util import binary_manager |
from telemetry.internal.util import command_line |
from telemetry.internal.util import ps_util |
from telemetry.util import matching |
+from telemetry.util import bot_utils |
# Right now, we only have one of each of our power perf bots. This means that |
@@ -358,18 +358,7 @@ def _GetJsonBenchmarkList(possible_browser, possible_reference_browser, |
base_name] |
perf_dashboard_id = base_name |
- # Based on the current timings, we shift the result of the hash function to |
- # achieve better load balancing. Those shift values are to be revised when |
- # necessary. The shift value is calculated such that the total cycle time |
- # is minimized. |
- hash_shift = { |
- 2 : 47, # for old desktop configurations with 2 slaves |
- 5 : 56, # for new desktop configurations with 5 slaves |
- 21 : 43 # for Android 3 slaves 7 devices configurations |
- } |
- shift = hash_shift.get(num_shards, 0) |
- base_name_hash = hashlib.sha1(base_name).hexdigest() |
- device_affinity = (int(base_name_hash, 16) >> shift) % num_shards |
+ device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name) |
output['steps'][base_name] = { |
'cmd': ' '.join(base_cmd + [ |