Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 # Monkeypatch IMapIterator so that Ctrl-C can kill everything properly. | |
| 6 # Derived from https://gist.github.com/aljungberg/626518 | |
| 7 import multiprocessing.pool | |
| 8 from multiprocessing.pool import IMapIterator | |
| 9 def wrapper(func): | |
| 10 def wrap(self, timeout=None): | |
| 11 return func(self, timeout=timeout or 1e100) | |
| 12 return wrap | |
| 13 IMapIterator.next = wrapper(IMapIterator.next) | |
| 14 IMapIterator.__next__ = IMapIterator.next | |
| 15 | |
| 16 | |
| 17 import binascii | |
| 18 import contextlib | |
| 19 import functools | |
| 20 import logging | |
| 21 import signal | |
| 22 import subprocess | |
| 23 import sys | |
| 24 import tempfile | |
| 25 import threading | |
| 26 | |
| 27 | |
| 28 GIT_EXE = 'git.bat' if sys.platform.startswith('win') else 'git' | |
| 29 | |
| 30 | |
| 31 class CalledProcessError(Exception): | |
| 32 def __init__(self, returncode, cmd): | |
| 33 super(CalledProcessError, self).__init__() | |
| 34 self.returncode = returncode | |
| 35 self.cmd = cmd | |
| 36 | |
| 37 def __str__(self): | |
| 38 return ( | |
| 39 'Command "%s" returned non-zero exit status %d' % | |
| 40 (self.cmd, self.returncode)) | |
| 41 | |
| 42 | |
| 43 def memoize_one(**kwargs): | |
| 44 """Memoizes a single-argument pure function. | |
| 45 | |
| 46 Values of None are not cached. | |
| 47 | |
| 48 Kwargs: | |
| 49 threadsafe (bool) - REQUIRED. Specifies whether to use locking around | |
| 50 cache manipulation functions. This is a kwarg so that users of memoize_one | |
| 51 are forced to explicitly and verbosely pick True or False. | |
| 52 | |
| 53 Adds three methods to the decorated function: | |
| 54 * get(key, default=None) - Gets the value for this key from the cache. | |
| 55 * set(key, value) - Sets the value for this key from the cache. | |
| 56 * clear() - Drops the entire contents of the cache. Useful for unittests. | |
| 57 * update(other) - Updates the contents of the cache from another dict. | |
| 58 """ | |
| 59 assert 'threadsafe' in kwargs, "Must specify threadsafe={True,False}" | |
| 60 threadsafe = kwargs['threadsafe'] | |
| 61 | |
| 62 if threadsafe: | |
| 63 def withlock(lock, f): | |
| 64 def inner(*args, **kwargs): | |
| 65 with lock: | |
| 66 return f(*args, **kwargs) | |
| 67 return inner | |
| 68 else: | |
| 69 def withlock(_lock, f): | |
| 70 return f | |
| 71 | |
| 72 def decorator(f): | |
| 73 # Instantiate the lock in decorator, in case users of memoize_one do: | |
| 74 # | |
| 75 # memoizer = memoize_one(threadsafe=True) | |
| 76 # | |
| 77 # @memoizer | |
| 78 # def fn1(val): ... | |
| 79 # | |
| 80 # @memoizer | |
| 81 # def fn2(val): ... | |
| 82 | |
| 83 lock = threading.Lock() if threadsafe else None | |
| 84 cache = {} | |
| 85 _get = withlock(lock, cache.get) | |
| 86 _set = withlock(lock, cache.__setitem__) | |
| 87 | |
| 88 @functools.wraps(f) | |
| 89 def inner(arg): | |
| 90 ret = _get(arg) | |
| 91 if ret is None: | |
| 92 ret = f(arg) | |
| 93 if ret is not None: | |
| 94 _set(arg, ret) | |
| 95 return ret | |
| 96 inner.get = _get | |
| 97 inner.set = _set | |
| 98 inner.clear = withlock(lock, cache.clear) | |
| 99 inner.update = withlock(lock, cache.update) | |
| 100 return inner | |
| 101 return decorator | |
| 102 | |
| 103 | |
| 104 def _ScopedPool_initer(orig, orig_args): # pragma: no cover | |
| 105 """Initializer method for ScopedPool's subprocesses. | |
| 106 | |
| 107 This helps ScopedPool handle Ctrl-C's correctly. | |
| 108 """ | |
| 109 signal.signal(signal.SIGINT, signal.SIG_IGN) | |
| 110 if orig: | |
| 111 orig(*orig_args) | |
| 112 | |
| 113 | |
| 114 @contextlib.contextmanager | |
| 115 def ScopedPool(*args, **kwargs): | |
| 116 """Context Manager which returns a multiprocessing.pool instance which | |
| 117 correctly deals with thrown exceptions. | |
| 118 | |
| 119 *args - Arguments to multiprocessing.pool | |
| 120 | |
| 121 Kwargs: | |
| 122 kind ('threads', 'procs') - The type of underlying coprocess to use. | |
| 123 **etc - Arguments to multiprocessing.pool | |
| 124 """ | |
| 125 if kwargs.pop('kind', None) == 'threads': | |
| 126 pool = multiprocessing.pool.ThreadPool(*args, **kwargs) | |
| 127 else: | |
| 128 orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ()) | |
| 129 kwargs['initializer'] = _ScopedPool_initer | |
| 130 kwargs['initargs'] = orig, orig_args | |
| 131 pool = multiprocessing.pool.Pool(*args, **kwargs) | |
| 132 | |
| 133 try: | |
| 134 yield pool | |
| 135 pool.close() | |
| 136 except: | |
| 137 pool.terminate() | |
| 138 raise | |
| 139 finally: | |
| 140 pool.join() | |
| 141 | |
| 142 | |
| 143 class ProgressPrinter(object): | |
|
M-A Ruel
2013/11/13 19:55:00
FTR, I prefer https://code.google.com/p/swarming/s
iannucci
2013/11/14 07:06:29
Nice :D.
Yeah we should have a standard library o
| |
| 144 """Threaded single-stat status message printer.""" | |
| 145 def __init__(self, fmt, enabled=None, stream=sys.stderr, period=0.5): | |
| 146 """Create a ProgressPrinter. | |
| 147 | |
| 148 Use it as a context manager which produces a simple 'increment' method: | |
| 149 | |
| 150 with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc: | |
| 151 for i in xrange(1000): | |
| 152 # do stuff | |
| 153 if i % 10 == 0: | |
| 154 inc(10) | |
| 155 | |
| 156 Args: | |
| 157 fmt - String format with a single '%(count)d' where the counter value | |
| 158 should go. | |
| 159 enabled (bool) - If this is None, will default to True if | |
| 160 logging.getLogger() is set to INFO or more verbose. | |
| 161 stream (file-like) - The stream to print status messages to. | |
| 162 period (float) - The time in seconds for the printer thread to wait | |
| 163 between printing. | |
| 164 """ | |
| 165 self.fmt = fmt | |
| 166 if enabled is None: # pragma: no cover | |
| 167 self.enabled = logging.getLogger().isEnabledFor(logging.INFO) | |
| 168 else: | |
| 169 self.enabled = enabled | |
| 170 | |
| 171 self._count = 0 | |
| 172 self._dead = False | |
| 173 self._dead_cond = threading.Condition() | |
| 174 self._stream = stream | |
| 175 self._thread = threading.Thread(target=self._run) | |
| 176 self._period = period | |
| 177 | |
| 178 def _emit(self, s): | |
| 179 if self.enabled: | |
| 180 self._stream.write('\r'+s) | |
|
M-A Ruel
2013/11/13 19:55:00
self._stream.write('\r' + s)
iannucci
2013/11/14 07:06:29
d'oh. Why doesn't our presubmit barf on this?
M-A Ruel
2013/11/14 17:27:24
No idea.
| |
| 181 self._stream.flush() | |
| 182 | |
| 183 def _run(self): | |
| 184 with self._dead_cond: | |
| 185 while not self._dead: | |
| 186 self._emit(self.fmt % {'count': self._count}) | |
| 187 self._dead_cond.wait(self._period) | |
| 188 self._emit((self.fmt+'\n') % {'count': self._count}) | |
| 189 | |
| 190 def inc(self, amount=1): | |
| 191 self._count += amount | |
| 192 | |
| 193 def __enter__(self): | |
| 194 self._thread.start() | |
| 195 return self.inc | |
| 196 | |
| 197 def __exit__(self, _exc_type, _exc_value, _traceback): | |
| 198 self._dead = True | |
| 199 with self._dead_cond: | |
| 200 self._dead_cond.notifyAll() | |
| 201 self._thread.join() | |
| 202 del self._thread | |
| 203 | |
| 204 | |
| 205 def parse_commitrefs(*commitrefs): | |
| 206 """Returns binary encoded commit hashes for one or more commitrefs. | |
| 207 | |
| 208 A commitref is anything which can resolve to a commit. Popular examples: | |
| 209 * "HEAD" | |
| 210 * "origin/master" | |
| 211 * "cool_branch~2" | |
| 212 """ | |
| 213 try: | |
| 214 return map(binascii.unhexlify, hashes(*commitrefs)) | |
| 215 except CalledProcessError: | |
| 216 raise Exception('one of %s does not seem to be a valid commitref.' % | |
| 217 str(commitrefs)) | |
|
M-A Ruel
2013/11/13 19:55:00
str() is not needed by definition.
iannucci
2013/11/14 07:06:29
Well... it is because commitrefs is a tuple. I don
| |
| 218 | |
| 219 | |
| 220 def _check_output(*popenargs, **kwargs): | |
|
M-A Ruel
2013/11/13 19:55:00
Why not subprocess2.check_output() ? It works on 2
iannucci
2013/11/14 07:06:29
Good question. I'll use that.
| |
| 221 """Runs a Popen command, and return the stdout as a string. | |
| 222 | |
| 223 Throws CalledProcessError if the command returns non-zero. | |
| 224 | |
| 225 kwargs: | |
| 226 indata (str) - Data to provide to the command on stdin. Mutually exclusive | |
| 227 with the Popen kwarg 'stdin'. | |
| 228 | |
| 229 Other than that, popenargs is *args to Popen, and **kwargs is... **kwargs to | |
| 230 Popen. | |
| 231 """ | |
| 232 kwargs.setdefault('stdout', subprocess.PIPE) | |
| 233 kwargs.setdefault('stderr', subprocess.PIPE) | |
| 234 indata = kwargs.pop('indata', None) | |
| 235 if indata is not None: | |
| 236 kwargs['stdin'] = subprocess.PIPE | |
| 237 process = subprocess.Popen(*popenargs, **kwargs) | |
| 238 output, _ = process.communicate(indata) | |
| 239 if process.returncode: | |
| 240 cmd = kwargs.get('args') | |
| 241 if cmd is None: | |
| 242 cmd = popenargs[0] | |
| 243 raise CalledProcessError(process.returncode, cmd) | |
| 244 return output | |
| 245 | |
| 246 | |
| 247 def run(*cmd, **kwargs): | |
| 248 """Runs a git command. Returns stdout as a string. | |
| 249 | |
| 250 If logging is DEBUG, we'll print the command before we run it. | |
| 251 | |
| 252 kwargs | |
| 253 autostrip (bool) - Strip the output. Defaults to True. | |
| 254 Output string is always strip()'d. | |
| 255 """ | |
| 256 autostrip = kwargs.pop('autostrip', True) | |
| 257 cmd = (GIT_EXE,) + cmd | |
| 258 logging.debug('running: %s', " ".join(repr(tok) for tok in cmd)) | |
| 259 ret = _check_output(cmd, **kwargs) | |
| 260 if autostrip: | |
| 261 ret = (ret or '').strip() | |
| 262 return ret | |
| 263 | |
| 264 | |
| 265 def hashes(*reflike): | |
| 266 return run('rev-parse', *reflike).splitlines() | |
| 267 | |
| 268 | |
| 269 def intern_f(f, kind='blob'): | |
| 270 """Interns a file object into the git object store. | |
| 271 | |
| 272 Args: | |
| 273 f (file-like object) - The file-like object to intern | |
| 274 kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'. | |
| 275 | |
| 276 Returns the git hash of the interned object (hex encoded). | |
| 277 """ | |
| 278 ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f) | |
| 279 f.close() | |
| 280 return ret | |
| 281 | |
| 282 | |
| 283 def tree(treeref, recurse=False): | |
| 284 """Returns a dict representation of a git tree object. | |
| 285 | |
| 286 Args: | |
| 287 treeref (str) - a git ref which resolves to a tree (commits count as trees). | |
| 288 recurse (bool) - include all of the tree's decendants too. File names will | |
| 289 take the form of 'some/path/to/file'. | |
| 290 | |
| 291 Return format: | |
| 292 { 'file_name': (mode, type, ref) } | |
| 293 | |
| 294 mode is an integer where: | |
| 295 * 0040000 - Directory | |
| 296 * 0100644 - Regular non-executable file | |
| 297 * 0100664 - Regular non-executable group-writeable file | |
| 298 * 0100755 - Regular executable file | |
| 299 * 0120000 - Symbolic link | |
| 300 * 0160000 - Gitlink | |
| 301 | |
| 302 type is a string where it's one of 'blob', 'commit', 'tree', 'tag'. | |
| 303 | |
| 304 ref is the hex encoded hash of the entry. | |
| 305 """ | |
| 306 ret = {} | |
| 307 opts = ['ls-tree', '--full-tree'] | |
| 308 if recurse: | |
| 309 opts += ['-r'] | |
| 310 opts.append(treeref) | |
| 311 try: | |
| 312 for line in run(*opts).splitlines(): | |
| 313 mode, typ, ref, name = line.split(None, 3) | |
| 314 ret[name] = (mode, typ, ref) | |
| 315 except CalledProcessError: | |
| 316 return None | |
| 317 return ret | |
| 318 | |
| 319 | |
| 320 def mktree(treedict): | |
| 321 """Makes a git tree object and returns its hash. | |
| 322 | |
| 323 See |tree()| for the values of mode, type, and ref. | |
| 324 | |
| 325 Args: | |
| 326 treedict - { name: (mode, type, ref) } | |
| 327 """ | |
| 328 with tempfile.TemporaryFile() as f: | |
| 329 for name, (mode, typ, ref) in treedict.iteritems(): | |
| 330 f.write('%s %s %s\t%s\0' % (mode, typ, ref, name)) | |
| 331 f.seek(0) | |
| 332 return run('mktree', '-z', stdin=f) | |
| OLD | NEW |