| OLD | NEW |
| 1 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """A Thread object for running the test shell and processing URLs from a | 5 """A Thread object for running the test shell and processing URLs from a |
| 6 shared queue. | 6 shared queue. |
| 7 | 7 |
| 8 Each thread runs a separate instance of the test_shell binary and validates | 8 Each thread runs a separate instance of the test_shell binary and validates |
| 9 the output. When there are no more URLs to process in the shared queue, the | 9 the output. When there are no more URLs to process in the shared queue, the |
| 10 thread exits. | 10 thread exits. |
| 11 """ | 11 """ |
| 12 | 12 |
| 13 import copy | 13 import copy |
| 14 import logging | 14 import logging |
| 15 import os | 15 import os |
| 16 import Queue | 16 import Queue |
| 17 import signal | 17 import signal |
| 18 import subprocess | 18 import subprocess |
| 19 import sys | 19 import sys |
| 20 import thread | 20 import thread |
| 21 import threading | 21 import threading |
| 22 import time | 22 import time |
| 23 | 23 |
| 24 import path_utils | 24 import path_utils |
| 25 import platform_utils | 25 import platform_utils |
| 26 import test_failures | 26 import test_failures |
| 27 | 27 |
| 28 def ProcessOutput(proc, filename, test_uri, test_types, test_args, target): | 28 def ProcessOutput(proc, test_info, test_types, test_args, target): |
| 29 """Receives the output from a test_shell process, subjects it to a number | 29 """Receives the output from a test_shell process, subjects it to a number |
| 30 of tests, and returns a list of failure types the test produced. | 30 of tests, and returns a list of failure types the test produced. |
| 31 | 31 |
| 32 Args: | 32 Args: |
| 33 proc: an active test_shell process | 33 proc: an active test_shell process |
| 34 filename: path of the test file being run | 34 test_info: Object containing the test filename, uri and timeout |
| 35 test_types: list of test types to subject the output to | 35 test_types: list of test types to subject the output to |
| 36 test_args: arguments to be passed to each test | 36 test_args: arguments to be passed to each test |
| 37 target: Debug or Release | 37 target: Debug or Release |
| 38 | 38 |
| 39 Returns: a list of failure objects and times for the test being processed | 39 Returns: a list of failure objects and times for the test being processed |
| 40 """ | 40 """ |
| 41 outlines = [] | 41 outlines = [] |
| 42 failures = [] | 42 failures = [] |
| 43 crash_or_timeout = False | 43 crash_or_timeout = False |
| 44 | 44 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 62 # ctrl+c is for the subprocess returncode to be negative SIGINT. And | 62 # ctrl+c is for the subprocess returncode to be negative SIGINT. And |
| 63 # that agrees with the subprocess documentation. | 63 # that agrees with the subprocess documentation. |
| 64 if (-1073741510 == proc.returncode or | 64 if (-1073741510 == proc.returncode or |
| 65 -signal.SIGINT == proc.returncode): | 65 -signal.SIGINT == proc.returncode): |
| 66 raise KeyboardInterrupt | 66 raise KeyboardInterrupt |
| 67 crash_or_timeout = True | 67 crash_or_timeout = True |
| 68 break | 68 break |
| 69 | 69 |
| 70 # Don't include #URL lines in our output | 70 # Don't include #URL lines in our output |
| 71 if line.startswith("#URL:"): | 71 if line.startswith("#URL:"): |
| 72 test_string = test_info.uri.strip() |
| 72 url = line.rstrip()[5:] | 73 url = line.rstrip()[5:] |
| 73 if url != test_uri: | 74 if url != test_string: |
| 74 logging.fatal("Test got out of sync:\n|%s|\n|%s|" % | 75 logging.fatal("Test got out of sync:\n|%s|\n|%s|" % |
| 75 (url, test_uri)) | 76 (url, test_string)) |
| 76 raise AssertionError("test out of sync") | 77 raise AssertionError("test out of sync") |
| 77 elif line.startswith("#MD5:"): | 78 elif line.startswith("#MD5:"): |
| 78 local_test_args.hash = line.rstrip()[5:] | 79 local_test_args.hash = line.rstrip()[5:] |
| 79 elif line.startswith("#TEST_TIMED_OUT"): | 80 elif line.startswith("#TEST_TIMED_OUT"): |
| 80 # Test timed out, but we still need to read until #EOF. | 81 # Test timed out, but we still need to read until #EOF. |
| 81 crash_or_timeout = True | 82 crash_or_timeout = True |
| 82 failures.append(test_failures.FailureTimeout()) | 83 failures.append(test_failures.FailureTimeout()) |
| 83 else: | 84 else: |
| 84 outlines.append(line) | 85 outlines.append(line) |
| 85 line = proc.stdout.readline() | 86 line = proc.stdout.readline() |
| 86 | 87 |
| 87 end_test_time = time.time() | 88 end_test_time = time.time() |
| 88 | 89 |
| 89 # Check the output and save the results. | 90 # Check the output and save the results. |
| 90 time_for_diffs = {} | 91 time_for_diffs = {} |
| 91 for test_type in test_types: | 92 for test_type in test_types: |
| 92 start_diff_time = time.time() | 93 start_diff_time = time.time() |
| 93 new_failures = test_type.CompareOutput(filename, proc, | 94 new_failures = test_type.CompareOutput(test_info.filename, |
| 95 proc, |
| 94 ''.join(outlines), | 96 ''.join(outlines), |
| 95 local_test_args, | 97 local_test_args, |
| 96 target) | 98 target) |
| 97 # Don't add any more failures if we already have a crash or timeout, so | 99 # Don't add any more failures if we already have a crash or timeout, so |
| 98 # we don't double-report those tests. | 100 # we don't double-report those tests. |
| 99 if not crash_or_timeout: | 101 if not crash_or_timeout: |
| 100 failures.extend(new_failures) | 102 failures.extend(new_failures) |
| 101 time_for_diffs[test_type.__class__.__name__] = ( | 103 time_for_diffs[test_type.__class__.__name__] = ( |
| 102 time.time() - start_diff_time) | 104 time.time() - start_diff_time) |
| 103 | 105 |
| 104 total_time_for_all_diffs = time.time() - end_test_time | 106 total_time_for_all_diffs = time.time() - end_test_time |
| 105 test_run_time = end_test_time - start_time | 107 test_run_time = end_test_time - start_time |
| 106 return TestStats(filename, failures, test_run_time, total_time_for_all_diffs, | 108 return TestStats(test_info.filename, failures, test_run_time, |
| 107 time_for_diffs) | 109 total_time_for_all_diffs, time_for_diffs) |
| 108 | 110 |
| 109 | 111 |
| 110 def StartTestShell(command, args): | 112 def StartTestShell(command, args): |
| 111 """Returns the process for a new test_shell started in layout-tests mode.""" | 113 """Returns the process for a new test_shell started in layout-tests mode.""" |
| 112 cmd = [] | 114 cmd = [] |
| 113 # Hook for injecting valgrind or other runtime instrumentation, | 115 # Hook for injecting valgrind or other runtime instrumentation, |
| 114 # used by e.g. tools/valgrind/valgrind_tests.py. | 116 # used by e.g. tools/valgrind/valgrind_tests.py. |
| 115 wrapper = os.environ.get("BROWSER_WRAPPER", None) | 117 wrapper = os.environ.get("BROWSER_WRAPPER", None) |
| 116 if wrapper != None: | 118 if wrapper != None: |
| 117 cmd += [wrapper] | 119 cmd += [wrapper] |
| 118 cmd += command + ['--layout-tests'] + args | 120 cmd += command + ['--layout-tests'] + args |
| 119 return subprocess.Popen(cmd, | 121 return subprocess.Popen(cmd, |
| 120 stdin=subprocess.PIPE, | 122 stdin=subprocess.PIPE, |
| 121 stdout=subprocess.PIPE, | 123 stdout=subprocess.PIPE, |
| 122 stderr=subprocess.STDOUT) | 124 stderr=subprocess.STDOUT) |
| 123 | 125 |
| 124 class TestStats: | 126 class TestStats: |
| 125 def __init__(self, filename, failures, test_run_time, | 127 def __init__(self, filename, failures, test_run_time, |
| 126 total_time_for_all_diffs, time_for_diffs): | 128 total_time_for_all_diffs, time_for_diffs): |
| 127 self.filename = filename | 129 self.filename = filename |
| 128 self.failures = failures | 130 self.failures = failures |
| 129 self.test_run_time = test_run_time | 131 self.test_run_time = test_run_time |
| 130 self.total_time_for_all_diffs = total_time_for_all_diffs | 132 self.total_time_for_all_diffs = total_time_for_all_diffs |
| 131 self.time_for_diffs = time_for_diffs | 133 self.time_for_diffs = time_for_diffs |
| 132 | 134 |
| 133 class SingleTestThread(threading.Thread): | 135 class SingleTestThread(threading.Thread): |
| 134 """Thread wrapper for running a single test file.""" | 136 """Thread wrapper for running a single test file.""" |
| 135 def __init__(self, test_shell_command, shell_args, test_uri, filename, | 137 def __init__(self, test_shell_command, shell_args, test_info, test_types, |
| 136 test_types, test_args, target): | 138 test_args, target): |
| 137 """ | 139 """ |
| 138 Args: | 140 Args: |
| 139 test_uri: full file:// or http:// URI of the test file to be run | 141 test_info: Object containing the test filename, uri and timeout |
| 140 filename: absolute local path to the test file | |
| 141 See TestShellThread for documentation of the remaining arguments. | 142 See TestShellThread for documentation of the remaining arguments. |
| 142 """ | 143 """ |
| 143 | 144 |
| 144 threading.Thread.__init__(self) | 145 threading.Thread.__init__(self) |
| 145 self._command = test_shell_command | 146 self._command = test_shell_command |
| 146 self._shell_args = shell_args | 147 self._shell_args = shell_args |
| 147 self._test_uri = test_uri | 148 self._test_info = test_info |
| 148 self._filename = filename | |
| 149 self._test_types = test_types | 149 self._test_types = test_types |
| 150 self._test_args = test_args | 150 self._test_args = test_args |
| 151 self._target = target | 151 self._target = target |
| 152 | 152 |
| 153 def run(self): | 153 def run(self): |
| 154 proc = StartTestShell(self._command, self._shell_args + [self._test_uri]) | 154 proc = StartTestShell(self._command, self._shell_args + |
| 155 self._test_stats = ProcessOutput(proc, self._filename, self._test_uri, | 155 ["--time-out-ms=" + self.test_info.timeout, self._test_info.uri]) |
| 156 self._test_types, self._test_args, self._target) | 156 self._test_stats = ProcessOutput(proc, self._test_info, self._test_types, |
| 157 self._test_args, self._target) |
| 157 | 158 |
| 158 def GetTestStats(self): | 159 def GetTestStats(self): |
| 159 return self._test_stats | 160 return self._test_stats |
| 160 | 161 |
| 161 class TestShellThread(threading.Thread): | 162 class TestShellThread(threading.Thread): |
| 162 | 163 |
| 163 def __init__(self, filename_list_queue, test_shell_command, test_types, | 164 def __init__(self, filename_list_queue, test_shell_command, test_types, |
| 164 test_args, shell_args, options): | 165 test_args, shell_args, options): |
| 165 """Initialize all the local state for this test shell thread. | 166 """Initialize all the local state for this test shell thread. |
| 166 | 167 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 190 self._directory_timing_stats = {} | 191 self._directory_timing_stats = {} |
| 191 self._test_stats = [] | 192 self._test_stats = [] |
| 192 | 193 |
| 193 # Current directory of tests we're running. | 194 # Current directory of tests we're running. |
| 194 self._current_dir = None | 195 self._current_dir = None |
| 195 # Number of tests in self._current_dir. | 196 # Number of tests in self._current_dir. |
| 196 self._num_tests_in_current_dir = None | 197 self._num_tests_in_current_dir = None |
| 197 # Time at which we started running tests from self._current_dir. | 198 # Time at which we started running tests from self._current_dir. |
| 198 self._current_dir_start_time = None | 199 self._current_dir_start_time = None |
| 199 | 200 |
| 200 if self._options.run_singly: | |
| 201 # When we're running one test per test_shell process, we can enforce | |
| 202 # a hard timeout. test_shell uses a default of 10 seconds if no | |
| 203 # time-out-ms is given, and the test_shell watchdog uses 2.5x the | |
| 204 # test_shell's value. We want to be larger than that. | |
| 205 self._time_out_sec = int(self._options.time_out_ms) * 3.0 / 1000.0 | |
| 206 logging.info("Setting Python per-test timeout to %s ms (%s sec)" % | |
| 207 (1000 * self._time_out_sec, self._time_out_sec)) | |
| 208 | |
| 209 | |
| 210 def GetFailures(self): | 201 def GetFailures(self): |
| 211 """Returns a dictionary mapping test filename to a list of | 202 """Returns a dictionary mapping test filename to a list of |
| 212 TestFailures.""" | 203 TestFailures.""" |
| 213 return self._failures | 204 return self._failures |
| 214 | 205 |
| 215 def GetDirectoryTimingStats(self): | 206 def GetDirectoryTimingStats(self): |
| 216 """Returns a dictionary mapping test directory to a tuple of | 207 """Returns a dictionary mapping test directory to a tuple of |
| 217 (number of tests in that directory, time to run the tests)""" | 208 (number of tests in that directory, time to run the tests)""" |
| 218 return self._directory_timing_stats; | 209 return self._directory_timing_stats; |
| 219 | 210 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 270 self._current_dir, self._filename_list = \ | 261 self._current_dir, self._filename_list = \ |
| 271 self._filename_list_queue.get_nowait() | 262 self._filename_list_queue.get_nowait() |
| 272 except Queue.Empty: | 263 except Queue.Empty: |
| 273 self._KillTestShell() | 264 self._KillTestShell() |
| 274 logging.debug("queue empty, quitting test shell thread") | 265 logging.debug("queue empty, quitting test shell thread") |
| 275 return | 266 return |
| 276 | 267 |
| 277 self._num_tests_in_current_dir = len(self._filename_list) | 268 self._num_tests_in_current_dir = len(self._filename_list) |
| 278 self._current_dir_start_time = time.time() | 269 self._current_dir_start_time = time.time() |
| 279 | 270 |
| 280 filename, test_uri = self._filename_list.pop() | 271 test_info = self._filename_list.pop() |
| 281 | 272 |
| 282 # We have a url, run tests. | 273 # We have a url, run tests. |
| 283 batch_count += 1 | 274 batch_count += 1 |
| 284 if self._options.run_singly: | 275 if self._options.run_singly: |
| 285 failures = self._RunTestSingly(filename, test_uri) | 276 failures = self._RunTestSingly(test_info) |
| 286 else: | 277 else: |
| 287 failures = self._RunTest(filename, test_uri) | 278 failures = self._RunTest(test_info) |
| 279 |
| 280 filename = test_info.filename |
| 288 if failures: | 281 if failures: |
| 289 # Check and kill test shell if we need too. | 282 # Check and kill test shell if we need too. |
| 290 if len([1 for f in failures if f.ShouldKillTestShell()]): | 283 if len([1 for f in failures if f.ShouldKillTestShell()]): |
| 291 self._KillTestShell() | 284 self._KillTestShell() |
| 292 # Reset the batch count since the shell just bounced. | 285 # Reset the batch count since the shell just bounced. |
| 293 batch_count = 0 | 286 batch_count = 0 |
| 294 # Print the error message(s). | 287 # Print the error message(s). |
| 295 error_str = '\n'.join([' ' + f.Message() for f in failures]) | 288 error_str = '\n'.join([' ' + f.Message() for f in failures]) |
| 296 logging.error("%s failed:\n%s" % | 289 logging.error("%s failed:\n%s" % |
| 297 (path_utils.RelativeTestFilename(filename), error_str)) | 290 (path_utils.RelativeTestFilename(filename), error_str)) |
| 298 # Group the errors for reporting. | 291 # Group the errors for reporting. |
| 299 self._failures[filename] = failures | 292 self._failures[filename] = failures |
| 300 else: | 293 else: |
| 301 logging.debug(path_utils.RelativeTestFilename(filename) + " passed") | 294 logging.debug(path_utils.RelativeTestFilename(filename) + " passed") |
| 295 |
| 302 if batch_size > 0 and batch_count > batch_size: | 296 if batch_size > 0 and batch_count > batch_size: |
| 303 # Bounce the shell and reset count. | 297 # Bounce the shell and reset count. |
| 304 self._KillTestShell() | 298 self._KillTestShell() |
| 305 batch_count = 0 | 299 batch_count = 0 |
| 306 | 300 |
| 307 | 301 |
| 308 def _RunTestSingly(self, filename, test_uri): | 302 def _RunTestSingly(self, test_info): |
| 309 """Run a test in a separate thread, enforcing a hard time limit. | 303 """Run a test in a separate thread, enforcing a hard time limit. |
| 310 | 304 |
| 311 Since we can only detect the termination of a thread, not any internal | 305 Since we can only detect the termination of a thread, not any internal |
| 312 state or progress, we can only run per-test timeouts when running test | 306 state or progress, we can only run per-test timeouts when running test |
| 313 files singly. | 307 files singly. |
| 308 |
| 309 Args: |
| 310 test_info: Object containing the test filename, uri and timeout |
| 311 |
| 312 Return: |
| 313 A list of TestFailure objects describing the error. |
| 314 """ | 314 """ |
| 315 worker = SingleTestThread(self._test_shell_command, | 315 worker = SingleTestThread(self._test_shell_command, |
| 316 self._shell_args, | 316 self._shell_args, |
| 317 test_uri, | 317 test_info, |
| 318 filename, | |
| 319 self._test_types, | 318 self._test_types, |
| 320 self._test_args, | 319 self._test_args, |
| 321 self._options.target) | 320 self._options.target) |
| 321 |
| 322 worker.start() | 322 worker.start() |
| 323 worker.join(self._time_out_sec) | 323 |
| 324 # When we're running one test per test_shell process, we can enforce |
| 325 # a hard timeout. the test_shell watchdog uses 2.5x the timeout |
| 326 # We want to be larger than that. |
| 327 worker.join(int(test_info.timeout) * 3.0 / 1000.0) |
| 324 if worker.isAlive(): | 328 if worker.isAlive(): |
| 325 # If join() returned with the thread still running, the test_shell.exe is | 329 # If join() returned with the thread still running, the test_shell.exe is |
| 326 # completely hung and there's nothing more we can do with it. We have | 330 # completely hung and there's nothing more we can do with it. We have |
| 327 # to kill all the test_shells to free it up. If we're running more than | 331 # to kill all the test_shells to free it up. If we're running more than |
| 328 # one test_shell thread, we'll end up killing the other test_shells too, | 332 # one test_shell thread, we'll end up killing the other test_shells too, |
| 329 # introducing spurious crashes. We accept that tradeoff in order to | 333 # introducing spurious crashes. We accept that tradeoff in order to |
| 330 # avoid losing the rest of this thread's results. | 334 # avoid losing the rest of this thread's results. |
| 331 logging.error('Test thread hung: killing all test_shells') | 335 logging.error('Test thread hung: killing all test_shells') |
| 332 # PlatformUtility() wants a base_dir, but it doesn't matter here. | 336 # PlatformUtility() wants a base_dir, but it doesn't matter here. |
| 333 platform_util = platform_utils.PlatformUtility('') | 337 platform_util = platform_utils.PlatformUtility('') |
| 334 platform_util.KillAllTestShells() | 338 platform_util.KillAllTestShells() |
| 335 | 339 |
| 336 try: | 340 try: |
| 337 stats = worker.GetTestStats() | 341 stats = worker.GetTestStats() |
| 338 self._test_stats.append(stats) | 342 self._test_stats.append(stats) |
| 339 failures = stats.failures | 343 failures = stats.failures |
| 340 except AttributeError, e: | 344 except AttributeError, e: |
| 341 failures = [] | 345 failures = [] |
| 342 logging.error('Cannot get results of test: %s' % filename) | 346 logging.error('Cannot get results of test: %s' % test_info.filename) |
| 343 | 347 |
| 344 return failures | 348 return failures |
| 345 | 349 |
| 346 | 350 def _RunTest(self, test_info): |
| 347 def _RunTest(self, filename, test_uri): | |
| 348 """Run a single test file using a shared test_shell process. | 351 """Run a single test file using a shared test_shell process. |
| 349 | 352 |
| 350 Args: | 353 Args: |
| 351 filename: The absolute filename of the test | 354 test_info: Object containing the test filename, uri and timeout |
| 352 test_uri: The URI version of the filename | |
| 353 | 355 |
| 354 Return: | 356 Return: |
| 355 A list of TestFailure objects describing the error. | 357 A list of TestFailure objects describing the error. |
| 356 """ | 358 """ |
| 357 self._EnsureTestShellIsRunning() | 359 self._EnsureTestShellIsRunning() |
| 360 # Args to test_shell is a space-separated list of "uri timeout" or just a |
| 361 # uri to use the default timeout specified in run_webkit_tests. |
| 362 self._test_shell_proc.stdin.write(("%s %s\n" % |
| 363 (test_info.uri, test_info.timeout))) |
| 358 | 364 |
| 359 # Ok, load the test URL... | |
| 360 self._test_shell_proc.stdin.write(test_uri + "\n") | |
| 361 # If the test shell is dead, the above may cause an IOError as we | 365 # If the test shell is dead, the above may cause an IOError as we |
| 362 # try to write onto the broken pipe. If this is the first test for | 366 # try to write onto the broken pipe. If this is the first test for |
| 363 # this test shell process, than the test shell did not | 367 # this test shell process, than the test shell did not |
| 364 # successfully start. If this is not the first test, then the | 368 # successfully start. If this is not the first test, then the |
| 365 # previous tests have caused some kind of delayed crash. We don't | 369 # previous tests have caused some kind of delayed crash. We don't |
| 366 # try to recover here. | 370 # try to recover here. |
| 367 self._test_shell_proc.stdin.flush() | 371 self._test_shell_proc.stdin.flush() |
| 368 | 372 |
| 369 stats = ProcessOutput(self._test_shell_proc, filename, test_uri, | 373 stats = ProcessOutput(self._test_shell_proc, test_info, self._test_types, |
| 370 self._test_types, self._test_args, self._options.target) | 374 self._test_args, self._options.target) |
| 371 | 375 |
| 372 self._test_stats.append(stats) | 376 self._test_stats.append(stats) |
| 373 return stats.failures | 377 return stats.failures |
| 374 | 378 |
| 375 | 379 |
| 376 def _EnsureTestShellIsRunning(self): | 380 def _EnsureTestShellIsRunning(self): |
| 377 """Start the shared test shell, if it's not running. Not for use when | 381 """Start the shared test shell, if it's not running. Not for use when |
| 378 running tests singly, since those each start a separate test shell in | 382 running tests singly, since those each start a separate test shell in |
| 379 their own thread. | 383 their own thread. |
| 380 """ | 384 """ |
| 381 if (not self._test_shell_proc or | 385 if (not self._test_shell_proc or |
| 382 self._test_shell_proc.poll() is not None): | 386 self._test_shell_proc.poll() is not None): |
| 383 self._test_shell_proc = StartTestShell(self._test_shell_command, | 387 self._test_shell_proc = StartTestShell(self._test_shell_command, |
| 384 self._shell_args) | 388 self._shell_args) |
| 385 | 389 |
| 386 def _KillTestShell(self): | 390 def _KillTestShell(self): |
| 387 """Kill the test shell process if it's running.""" | 391 """Kill the test shell process if it's running.""" |
| 388 if self._test_shell_proc: | 392 if self._test_shell_proc: |
| 389 self._test_shell_proc.stdin.close() | 393 self._test_shell_proc.stdin.close() |
| 390 self._test_shell_proc.stdout.close() | 394 self._test_shell_proc.stdout.close() |
| 391 if self._test_shell_proc.stderr: | 395 if self._test_shell_proc.stderr: |
| 392 self._test_shell_proc.stderr.close() | 396 self._test_shell_proc.stderr.close() |
| 393 if sys.platform not in ('win32', 'cygwin'): | |
| 394 # Closing stdin/stdout/stderr hangs sometimes on OS X. | |
| 395 subprocess.Popen(["kill", "-9", str(self._test_shell_proc.pid)]) | |
| 396 self._test_shell_proc = None | 397 self._test_shell_proc = None |
| OLD | NEW |