Chromium Code Reviews| Index: client/utils/subprocess42.py |
| diff --git a/client/utils/subprocess42.py b/client/utils/subprocess42.py |
| index cbfdeac2a6559128b82da07264c0637433de546c..f774870098647ae5fbc480fd721c77ea6b903114 100644 |
| --- a/client/utils/subprocess42.py |
| +++ b/client/utils/subprocess42.py |
| @@ -387,6 +387,13 @@ class Popen(subprocess.Popen): |
| self.end = time.time() |
| return ret |
| + def yield_any_line(self, **kwargs): |
| + """Yields lines until the process terminates. |
| + |
| + Like yield_any, but yields lines. |
| + """ |
| + return split(self.yield_any(**kwargs)) |
| + |
| def yield_any(self, maxsize=None, timeout=None): |
| """Yields output until the process terminates. |
| @@ -418,8 +425,8 @@ class Popen(subprocess.Popen): |
| last_yield = time.time() |
| while self.poll() is None: |
| - to = (None if timeout is None |
| - else max(timeout() - (time.time() - last_yield), 0)) |
| + to = timeout() if timeout else None |
|
M-A Ruel
2016/06/06 23:34:51
if you want to make this more readable, do:
to =
nodir
2016/06/07 18:46:35
to be clear: the purpose of the change was to allo
|
| + to = max(to - (time.time() - last_yield), 0) if to is not None else None |
| t, data = self.recv_any( |
| maxsize=maxsize() if callable(maxsize) else maxsize, timeout=to) |
| if data or to is 0: |
| @@ -635,3 +642,69 @@ def inhibit_os_error_reporting(): |
| # defaults write com.apple.CrashReporter UseUNC 1 |
| # defaults write com.apple.CrashReporter DialogType none |
| # - Ubuntu, disable apport if needed. |
| + |
| + |
| +def split(data, sep='\n'): |
| + """Splits pipe data by |sep|. Does some buffering. |
| + |
| + For example, [('stdout', 'a\nb'), ('stdout', '\n'), ('stderr', 'c\n')] -> |
| + [('stdout', 'a'), ('stdout', 'b'), ('stderr', 'c')]. |
| + |
| + If a chunk of data from one pipe does not end with the separator, and next |
|
Vadim Sh.
2016/06/06 21:32:14
I don't think that's a good idea (see below, also
nodir
2016/06/07 18:46:35
the only reason I did this is to buffer less, but
|
| + chunk comes from another pipe, the end of the first chunk is returned as is. |
| + E.g. [('stdout', 'a\nb'), ('stderr', 'c')] -> |
| + [('stdout', 'a'), ('stdout', 'b'), ('stderr', 'c')]. |
| + |
| + Args: |
| + data: iterable of tuples (pipe_name, bytes). |
| + |
| + Returns: |
| + An iterator of tuples (pipe_name, bytes) where bytes is the input data |
| + but split by sep into separate tuples. |
| + """ |
| + pending_chunks = None |
| + pending_chunks_pipe_name = None |
| + |
| + for pipe_name, chunk in data: |
| + start = 0 # offset in chunk to start |sep| search from |
| + |
| + if pending_chunks: |
| + if pending_chunks_pipe_name != pipe_name: |
|
Vadim Sh.
2016/06/06 21:32:14
why? interleaving of stdout and stderr in general
nodir
2016/06/07 18:46:35
removed, simplified
|
| + # don't buffer more than one pipe at a time. |
| + # yield pending chunk as is and forget. |
| + yield pending_chunks_pipe_name, ''.join(pending_chunks) |
| + pending_chunks = None |
| + pending_chunks_pipe_name = None |
| + else: |
| + # it is the same pipe |
| + |
| + j = chunk.find(sep) |
| + if j == -1: |
| + # this chunk is incomplete either |
| + pending_chunks.append(chunk) |
| + continue |
| + |
| + # beginning of this chunk is a continuation of the pending one. |
| + pending_chunks.append(chunk[:j]) |
| + # now the pending chunk is complete: yield and forget |
| + yield pending_chunks_pipe_name, ''.join(pending_chunks) |
| + pending_chunks = None |
| + pending_chunks_pipe_name = None |
| + |
| + start = j + 1 |
| + |
| + # split the chunk and yield parts that are followed by sep |
| + while start < len(chunk): |
| + j = chunk.find(sep, start) |
| + if j != -1: |
| + yield pipe_name, chunk[start:j] |
| + start = j + 1 |
| + else: |
| + # last part is incomplete |
| + pending_chunks = [chunk[start:]] |
| + pending_chunks_pipe_name = pipe_name |
| + break |
| + # data is exhausted |
| + |
| + if pending_chunks: |
| + yield pending_chunks_pipe_name, ''.join(pending_chunks) |