Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(539)

Side by Side Diff: client/common_lib/utils.py

Issue 6124004: Revert "Merge remote branch 'cros/upstream' into autotest-rebase" (Closed) Base URL: ssh://git@gitrw.chromium.org:9222/autotest.git@master
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « client/common_lib/software_manager.py ('k') | client/profilers/cpistat/cpistat.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 """ 1 #
2 Convenience functions for use by tests or whomever. 2 # Copyright 2008 Google Inc. Released under the GPL v2
3 3
4 NOTE: this is a mixin library that pulls in functions from several places 4 import os, pickle, random, re, resource, select, shutil, signal, StringIO
5 Note carefully what the precendece order is 5 import socket, struct, subprocess, sys, time, textwrap, urlparse
6 6 import warnings, smtplib, logging, urllib2
7 There's no really good way to do this, as this isn't a class we can do 7 from threading import Thread, Event
8 inheritance with, just a collection of static methods. 8 try:
9 """ 9 import hashlib
10 10 except ImportError:
11 from autotest_lib.client.common_lib.base_utils import * 11 import md5, sha
12 if os.path.exists(os.path.join(os.path.dirname(__file__), 'site_utils.py')): 12 from autotest_lib.client.common_lib import error, logging_manager
13 from autotest_lib.client.common_lib.site_utils import * 13
14 def deprecated(func):
15 """This is a decorator which can be used to mark functions as deprecated.
16 It will result in a warning being emmitted when the function is used."""
17 def new_func(*args, **dargs):
18 warnings.warn("Call to deprecated function %s." % func.__name__,
19 category=DeprecationWarning)
20 return func(*args, **dargs)
21 new_func.__name__ = func.__name__
22 new_func.__doc__ = func.__doc__
23 new_func.__dict__.update(func.__dict__)
24 return new_func
25
26
27 class _NullStream(object):
28 def write(self, data):
29 pass
30
31
32 def flush(self):
33 pass
34
35
36 TEE_TO_LOGS = object()
37 _the_null_stream = _NullStream()
38
39 DEFAULT_STDOUT_LEVEL = logging.DEBUG
40 DEFAULT_STDERR_LEVEL = logging.ERROR
41
42 # prefixes for logging stdout/stderr of commands
43 STDOUT_PREFIX = '[stdout] '
44 STDERR_PREFIX = '[stderr] '
45
46
47 def get_stream_tee_file(stream, level, prefix=''):
48 if stream is None:
49 return _the_null_stream
50 if stream is TEE_TO_LOGS:
51 return logging_manager.LoggingFile(level=level, prefix=prefix)
52 return stream
53
54
55 class BgJob(object):
56 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
57 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
58 self.command = command
59 self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
60 prefix=STDOUT_PREFIX)
61 self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
62 prefix=STDERR_PREFIX)
63 self.result = CmdResult(command)
64
65 # allow for easy stdin input by string, we'll let subprocess create
66 # a pipe for stdin input and we'll write to it in the wait loop
67 if isinstance(stdin, basestring):
68 self.string_stdin = stdin
69 stdin = subprocess.PIPE
70 else:
71 self.string_stdin = None
72
73 if verbose:
74 logging.debug("Running '%s'" % command)
75 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
76 stderr=subprocess.PIPE,
77 preexec_fn=self._reset_sigpipe, shell=True,
78
79 # Default shell in ChromeOS test image is
80 # already bash. We're seeing shell-init
81 # errors if this value is set.
82
83 #executable="/bin/bash",
84 stdin=stdin)
85
86
87 def output_prepare(self, stdout_file=None, stderr_file=None):
88 self.stdout_file = stdout_file
89 self.stderr_file = stderr_file
90
91
92 def process_output(self, stdout=True, final_read=False):
93 """output_prepare must be called prior to calling this"""
94 if stdout:
95 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
96 else:
97 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
98
99 if final_read:
100 # read in all the data we can from pipe and then stop
101 data = []
102 while select.select([pipe], [], [], 0)[0]:
103 data.append(os.read(pipe.fileno(), 1024))
104 if len(data[-1]) == 0:
105 break
106 data = "".join(data)
107 else:
108 # perform a single read
109 data = os.read(pipe.fileno(), 1024)
110 buf.write(data)
111 tee.write(data)
112
113
114 def cleanup(self):
115 self.stdout_tee.flush()
116 self.stderr_tee.flush()
117 self.sp.stdout.close()
118 self.sp.stderr.close()
119 self.result.stdout = self.stdout_file.getvalue()
120 self.result.stderr = self.stderr_file.getvalue()
121
122
123 def _reset_sigpipe(self):
124 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
125
126
127 def ip_to_long(ip):
128 # !L is a long in network byte order
129 return struct.unpack('!L', socket.inet_aton(ip))[0]
130
131
132 def long_to_ip(number):
133 # See above comment.
134 return socket.inet_ntoa(struct.pack('!L', number))
135
136
137 def create_subnet_mask(bits):
138 return (1 << 32) - (1 << 32-bits)
139
140
141 def format_ip_with_mask(ip, mask_bits):
142 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
143 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
144
145
146 def normalize_hostname(alias):
147 ip = socket.gethostbyname(alias)
148 return socket.gethostbyaddr(ip)[0]
149
150
151 def get_ip_local_port_range():
152 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
153 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
154 return (int(match.group(1)), int(match.group(2)))
155
156
157 def set_ip_local_port_range(lower, upper):
158 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
159 '%d %d\n' % (lower, upper))
160
161
162
163 def send_email(mail_from, mail_to, subject, body):
164 """
165 Sends an email via smtp
166
167 mail_from: string with email address of sender
168 mail_to: string or list with email address(es) of recipients
169 subject: string with subject of email
170 body: (multi-line) string with body of email
171 """
172 if isinstance(mail_to, str):
173 mail_to = [mail_to]
174 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
175 subject, body)
176 try:
177 mailer = smtplib.SMTP('localhost')
178 try:
179 mailer.sendmail(mail_from, mail_to, msg)
180 finally:
181 mailer.quit()
182 except Exception, e:
183 # Emails are non-critical, not errors, but don't raise them
184 print "Sending email failed. Reason: %s" % repr(e)
185
186
187 def read_one_line(filename):
188 return open(filename, 'r').readline().rstrip('\n')
189
190
191 def read_file(filename):
192 f = open(filename)
193 try:
194 return f.read()
195 finally:
196 f.close()
197
198
199 def get_field(data, param, linestart="", sep=" "):
200 """
201 Parse data from string.
202 @param data: Data to parse.
203 example:
204 data:
205 cpu 324 345 34 5 345
206 cpu0 34 11 34 34 33
207 ^^^^
208 start of line
209 params 0 1 2 3 4
210 @param param: Position of parameter after linestart marker.
211 @param linestart: String to which start line with parameters.
212 @param sep: Separator between parameters regular expression.
213 """
214 search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
215 find = search.search(data)
216 if find != None:
217 return re.split("%s" % sep, find.group(1))[param]
218 else:
219 print "There is no line which starts with %s in data." % linestart
220 return None
221
222
223 def write_one_line(filename, line):
224 open_write_close(filename, line.rstrip('\n') + '\n')
225
226
227 def open_write_close(filename, data):
228 f = open(filename, 'w')
229 try:
230 f.write(data)
231 finally:
232 f.close()
233
234
235 def matrix_to_string(matrix, header=None):
236 """
237 Return a pretty, aligned string representation of a nxm matrix.
238
239 This representation can be used to print any tabular data, such as
240 database results. It works by scanning the lengths of each element
241 in each column, and determining the format string dynamically.
242
243 @param matrix: Matrix representation (list with n rows of m elements).
244 @param header: Optional tuple or list with header elements to be displayed.
245 """
246 if type(header) is list:
247 header = tuple(header)
248 lengths = []
249 if header:
250 for column in header:
251 lengths.append(len(column))
252 for row in matrix:
253 for column in row:
254 i = row.index(column)
255 cl = len(column)
256 try:
257 ml = lengths[i]
258 if cl > ml:
259 lengths[i] = cl
260 except IndexError:
261 lengths.append(cl)
262
263 lengths = tuple(lengths)
264 format_string = ""
265 for length in lengths:
266 format_string += "%-" + str(length) + "s "
267 format_string += "\n"
268
269 matrix_str = ""
270 if header:
271 matrix_str += format_string % header
272 for row in matrix:
273 matrix_str += format_string % tuple(row)
274
275 return matrix_str
276
277
278 def read_keyval(path):
279 """
280 Read a key-value pair format file into a dictionary, and return it.
281 Takes either a filename or directory name as input. If it's a
282 directory name, we assume you want the file to be called keyval.
283 """
284 if os.path.isdir(path):
285 path = os.path.join(path, 'keyval')
286 keyval = {}
287 if os.path.exists(path):
288 for line in open(path):
289 line = re.sub('#.*', '', line).rstrip()
290 if not re.search(r'^[-\.\w]+=', line):
291 raise ValueError('Invalid format line: %s' % line)
292 key, value = line.split('=', 1)
293 if re.search('^\d+$', value):
294 value = int(value)
295 elif re.search('^(\d+\.)?\d+$', value):
296 value = float(value)
297 keyval[key] = value
298 return keyval
299
300
301 def write_keyval(path, dictionary, type_tag=None):
302 """
303 Write a key-value pair format file out to a file. This uses append
304 mode to open the file, so existing text will not be overwritten or
305 reparsed.
306
307 If type_tag is None, then the key must be composed of alphanumeric
308 characters (or dashes+underscores). However, if type-tag is not
309 null then the keys must also have "{type_tag}" as a suffix. At
310 the moment the only valid values of type_tag are "attr" and "perf".
311 """
312 if os.path.isdir(path):
313 path = os.path.join(path, 'keyval')
314 keyval = open(path, 'a')
315
316 if type_tag is None:
317 key_regex = re.compile(r'^[-\.\w]+$')
318 else:
319 if type_tag not in ('attr', 'perf'):
320 raise ValueError('Invalid type tag: %s' % type_tag)
321 escaped_tag = re.escape(type_tag)
322 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
323 try:
324 for key in sorted(dictionary.keys()):
325 if not key_regex.search(key):
326 raise ValueError('Invalid key: %s' % key)
327 keyval.write('%s=%s\n' % (key, dictionary[key]))
328 finally:
329 keyval.close()
330
331
332 class FileFieldMonitor(object):
333 """
334 Monitors the information from the file and reports it's values.
335
336 It gather the information at start and stop of the measurement or
337 continuously during the measurement.
338 """
339 class Monitor(Thread):
340 """
341 Internal monitor class to ensure continuous monitor of monitored file.
342 """
343 def __init__(self, master):
344 """
345 @param master: Master class which control Monitor
346 """
347 Thread.__init__(self)
348 self.master = master
349
350 def run(self):
351 """
352 Start monitor in thread mode
353 """
354 while not self.master.end_event.isSet():
355 self.master._get_value(self.master.logging)
356 time.sleep(self.master.time_step)
357
358
359 def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
360 contlogging=False, separator=" +", time_step=0.1):
361 """
362 Initialize variables.
363 @param status_file: File contain status.
364 @param mode_diff: If True make a difference of value, else average.
365 @param data_to_read: List of tuples with data position.
366 format: [(start_of_line,position in params)]
367 example:
368 data:
369 cpu 324 345 34 5 345
370 cpu0 34 11 34 34 33
371 ^^^^
372 start of line
373 params 0 1 2 3 4
374 @param mode_diff: True to subtract old value from new value,
375 False make average of the values.
376 @parma continuously: Start the monitoring thread using the time_step
377 as the measurement period.
378 @param contlogging: Log data in continuous run.
379 @param separator: Regular expression of separator.
380 @param time_step: Time period of the monitoring value.
381 """
382 self.end_event = Event()
383 self.start_time = 0
384 self.end_time = 0
385 self.test_time = 0
386
387 self.status_file = status_file
388 self.separator = separator
389 self.data_to_read = data_to_read
390 self.num_of_params = len(self.data_to_read)
391 self.mode_diff = mode_diff
392 self.continuously = continuously
393 self.time_step = time_step
394
395 self.value = [0 for i in range(self.num_of_params)]
396 self.old_value = [0 for i in range(self.num_of_params)]
397 self.log = []
398 self.logging = contlogging
399
400 self.started = False
401 self.num_of_get_value = 0
402 self.monitor = None
403
404
405 def _get_value(self, logging=True):
406 """
407 Return current values.
408 @param logging: If true log value in memory. There can be problem
409 with long run.
410 """
411 data = read_file(self.status_file)
412 value = []
413 for i in range(self.num_of_params):
414 value.append(int(get_field(data,
415 self.data_to_read[i][1],
416 self.data_to_read[i][0],
417 self.separator)))
418
419 if logging:
420 self.log.append(value)
421 if not self.mode_diff:
422 value = map(lambda x, y: x + y, value, self.old_value)
423
424 self.old_value = value
425 self.num_of_get_value += 1
426 return value
427
428
429 def start(self):
430 """
431 Start value monitor.
432 """
433 if self.started:
434 self.stop()
435 self.old_value = [0 for i in range(self.num_of_params)]
436 self.num_of_get_value = 0
437 self.log = []
438 self.end_event.clear()
439 self.start_time = time.time()
440 self._get_value()
441 self.started = True
442 if (self.continuously):
443 self.monitor = FileFieldMonitor.Monitor(self)
444 self.monitor.start()
445
446
447 def stop(self):
448 """
449 Stop value monitor.
450 """
451 if self.started:
452 self.started = False
453 self.end_time = time.time()
454 self.test_time = self.end_time - self.start_time
455 self.value = self._get_value()
456 if (self.continuously):
457 self.end_event.set()
458 self.monitor.join()
459 if (self.mode_diff):
460 self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
461 else:
462 self.value = map(lambda x: x / self.num_of_get_value,
463 self.value)
464
465
466 def get_status(self):
467 """
468 @return: Status of monitored process average value,
469 time of test and array of monitored values and time step of
470 continuous run.
471 """
472 if self.started:
473 self.stop()
474 if self.mode_diff:
475 for i in range(len(self.log) - 1):
476 self.log[i] = (map(lambda x, y: x - y,
477 self.log[i + 1], self.log[i]))
478 self.log.pop()
479 return (self.value, self.test_time, self.log, self.time_step)
480
481
482 def is_url(path):
483 """Return true if path looks like a URL"""
484 # for now, just handle http and ftp
485 url_parts = urlparse.urlparse(path)
486 return (url_parts[0] in ('http', 'ftp'))
487
488
489 def urlopen(url, data=None, timeout=5):
490 """Wrapper to urllib2.urlopen with timeout addition."""
491
492 # Save old timeout
493 old_timeout = socket.getdefaulttimeout()
494 socket.setdefaulttimeout(timeout)
495 try:
496 return urllib2.urlopen(url, data=data)
497 finally:
498 socket.setdefaulttimeout(old_timeout)
499
500
501 def urlretrieve(url, filename, data=None, timeout=300):
502 """Retrieve a file from given url."""
503 logging.debug('Fetching %s -> %s', url, filename)
504
505 src_file = urlopen(url, data=data, timeout=timeout)
506 try:
507 dest_file = open(filename, 'wb')
508 try:
509 shutil.copyfileobj(src_file, dest_file)
510 finally:
511 dest_file.close()
512 finally:
513 src_file.close()
514
515
516 def hash(type, input=None):
517 """
518 Returns an hash object of type md5 or sha1. This function is implemented in
519 order to encapsulate hash objects in a way that is compatible with python
520 2.4 and python 2.6 without warnings.
521
522 Note that even though python 2.6 hashlib supports hash types other than
523 md5 and sha1, we are artificially limiting the input values in order to
524 make the function to behave exactly the same among both python
525 implementations.
526
527 @param input: Optional input string that will be used to update the hash.
528 """
529 if type not in ['md5', 'sha1']:
530 raise ValueError("Unsupported hash type: %s" % type)
531
532 try:
533 hash = hashlib.new(type)
534 except NameError:
535 if type == 'md5':
536 hash = md5.new()
537 elif type == 'sha1':
538 hash = sha.new()
539
540 if input:
541 hash.update(input)
542
543 return hash
544
545
546 def get_file(src, dest, permissions=None):
547 """Get a file from src, which can be local or a remote URL"""
548 if src == dest:
549 return
550
551 if is_url(src):
552 urlretrieve(src, dest)
553 else:
554 shutil.copyfile(src, dest)
555
556 if permissions:
557 os.chmod(dest, permissions)
558 return dest
559
560
561 def unmap_url(srcdir, src, destdir='.'):
562 """
563 Receives either a path to a local file or a URL.
564 returns either the path to the local file, or the fetched URL
565
566 unmap_url('/usr/src', 'foo.tar', '/tmp')
567 = '/usr/src/foo.tar'
568 unmap_url('/usr/src', 'http://site/file', '/tmp')
569 = '/tmp/file'
570 (after retrieving it)
571 """
572 if is_url(src):
573 url_parts = urlparse.urlparse(src)
574 filename = os.path.basename(url_parts[2])
575 dest = os.path.join(destdir, filename)
576 return get_file(src, dest)
577 else:
578 return os.path.join(srcdir, src)
579
580
581 def update_version(srcdir, preserve_srcdir, new_version, install,
582 *args, **dargs):
583 """
584 Make sure srcdir is version new_version
585
586 If not, delete it and install() the new version.
587
588 In the preserve_srcdir case, we just check it's up to date,
589 and if not, we rerun install, without removing srcdir
590 """
591 versionfile = os.path.join(srcdir, '.version')
592 install_needed = True
593
594 if os.path.exists(versionfile):
595 old_version = pickle.load(open(versionfile))
596 if old_version == new_version:
597 install_needed = False
598
599 if install_needed:
600 if not preserve_srcdir and os.path.exists(srcdir):
601 shutil.rmtree(srcdir)
602 install(*args, **dargs)
603 if os.path.exists(srcdir):
604 pickle.dump(new_version, open(versionfile, 'w'))
605
606
607 def get_stderr_level(stderr_is_expected):
608 if stderr_is_expected:
609 return DEFAULT_STDOUT_LEVEL
610 return DEFAULT_STDERR_LEVEL
611
612
613 def run(command, timeout=None, ignore_status=False,
614 stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
615 stderr_is_expected=None, args=()):
616 """
617 Run a command on the host.
618
619 @param command: the command line string.
620 @param timeout: time limit in seconds before attempting to kill the
621 running process. The run() function will take a few seconds
622 longer than 'timeout' to complete if it has to kill the process.
623 @param ignore_status: do not raise an exception, no matter what the exit
624 code of the command is.
625 @param stdout_tee: optional file-like object to which stdout data
626 will be written as it is generated (data will still be stored
627 in result.stdout).
628 @param stderr_tee: likewise for stderr.
629 @param verbose: if True, log the command being run.
630 @param stdin: stdin to pass to the executed process (can be a file
631 descriptor, a file object of a real file or a string).
632 @param args: sequence of strings of arguments to be given to the command
633 inside " quotes after they have been escaped for that; each
634 element in the sequence will be given as a separate command
635 argument
636
637 @return a CmdResult object
638
639 @raise CmdError: the exit code of the command execution was not 0
640 """
641 if isinstance(args, basestring):
642 raise TypeError('Got a string for the "args" keyword argument, '
643 'need a sequence.')
644
645 for arg in args:
646 command += ' "%s"' % sh_escape(arg)
647 if stderr_is_expected is None:
648 stderr_is_expected = ignore_status
649
650 bg_job = join_bg_jobs(
651 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
652 stderr_level=get_stderr_level(stderr_is_expected)),),
653 timeout)[0]
654 if not ignore_status and bg_job.result.exit_status:
655 raise error.CmdError(command, bg_job.result,
656 "Command returned non-zero exit status")
657
658 return bg_job.result
659
660
661 def run_parallel(commands, timeout=None, ignore_status=False,
662 stdout_tee=None, stderr_tee=None):
663 """
664 Behaves the same as run() with the following exceptions:
665
666 - commands is a list of commands to run in parallel.
667 - ignore_status toggles whether or not an exception should be raised
668 on any error.
669
670 @return: a list of CmdResult objects
671 """
672 bg_jobs = []
673 for command in commands:
674 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
675 stderr_level=get_stderr_level(ignore_status)))
676
677 # Updates objects in bg_jobs list with their process information
678 join_bg_jobs(bg_jobs, timeout)
679
680 for bg_job in bg_jobs:
681 if not ignore_status and bg_job.result.exit_status:
682 raise error.CmdError(command, bg_job.result,
683 "Command returned non-zero exit status")
684
685 return [bg_job.result for bg_job in bg_jobs]
686
687
688 @deprecated
689 def run_bg(command):
690 """Function deprecated. Please use BgJob class instead."""
691 bg_job = BgJob(command)
692 return bg_job.sp, bg_job.result
693
694
695 def join_bg_jobs(bg_jobs, timeout=None):
696 """Joins the bg_jobs with the current thread.
697
698 Returns the same list of bg_jobs objects that was passed in.
699 """
700 ret, timeout_error = 0, False
701 for bg_job in bg_jobs:
702 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
703
704 try:
705 # We are holding ends to stdin, stdout pipes
706 # hence we need to be sure to close those fds no mater what
707 start_time = time.time()
708 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
709
710 for bg_job in bg_jobs:
711 # Process stdout and stderr
712 bg_job.process_output(stdout=True,final_read=True)
713 bg_job.process_output(stdout=False,final_read=True)
714 finally:
715 # close our ends of the pipes to the sp no matter what
716 for bg_job in bg_jobs:
717 bg_job.cleanup()
718
719 if timeout_error:
720 # TODO: This needs to be fixed to better represent what happens when
721 # running in parallel. However this is backwards compatable, so it will
722 # do for the time being.
723 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
724 "Command(s) did not complete within %d seconds"
725 % timeout)
726
727
728 return bg_jobs
729
730
731 def _wait_for_commands(bg_jobs, start_time, timeout):
732 # This returns True if it must return due to a timeout, otherwise False.
733
734 # To check for processes which terminate without producing any output
735 # a 1 second timeout is used in select.
736 SELECT_TIMEOUT = 1
737
738 read_list = []
739 write_list = []
740 reverse_dict = {}
741
742 for bg_job in bg_jobs:
743 read_list.append(bg_job.sp.stdout)
744 read_list.append(bg_job.sp.stderr)
745 reverse_dict[bg_job.sp.stdout] = (bg_job, True)
746 reverse_dict[bg_job.sp.stderr] = (bg_job, False)
747 if bg_job.string_stdin is not None:
748 write_list.append(bg_job.sp.stdin)
749 reverse_dict[bg_job.sp.stdin] = bg_job
750
751 if timeout:
752 stop_time = start_time + timeout
753 time_left = stop_time - time.time()
754 else:
755 time_left = None # so that select never times out
756
757 while not timeout or time_left > 0:
758 # select will return when we may write to stdin or when there is
759 # stdout/stderr output we can read (including when it is
760 # EOF, that is the process has terminated).
761 read_ready, write_ready, _ = select.select(read_list, write_list, [],
762 SELECT_TIMEOUT)
763
764 # os.read() has to be used instead of
765 # subproc.stdout.read() which will otherwise block
766 for file_obj in read_ready:
767 bg_job, is_stdout = reverse_dict[file_obj]
768 bg_job.process_output(is_stdout)
769
770 for file_obj in write_ready:
771 # we can write PIPE_BUF bytes without blocking
772 # POSIX requires PIPE_BUF is >= 512
773 bg_job = reverse_dict[file_obj]
774 file_obj.write(bg_job.string_stdin[:512])
775 bg_job.string_stdin = bg_job.string_stdin[512:]
776 # no more input data, close stdin, remove it from the select set
777 if not bg_job.string_stdin:
778 file_obj.close()
779 write_list.remove(file_obj)
780 del reverse_dict[file_obj]
781
782 all_jobs_finished = True
783 for bg_job in bg_jobs:
784 if bg_job.result.exit_status is not None:
785 continue
786
787 bg_job.result.exit_status = bg_job.sp.poll()
788 if bg_job.result.exit_status is not None:
789 # process exited, remove its stdout/stdin from the select set
790 bg_job.result.duration = time.time() - start_time
791 read_list.remove(bg_job.sp.stdout)
792 read_list.remove(bg_job.sp.stderr)
793 del reverse_dict[bg_job.sp.stdout]
794 del reverse_dict[bg_job.sp.stderr]
795 else:
796 all_jobs_finished = False
797
798 if all_jobs_finished:
799 return False
800
801 if timeout:
802 time_left = stop_time - time.time()
803
804 # Kill all processes which did not complete prior to timeout
805 for bg_job in bg_jobs:
806 if bg_job.result.exit_status is not None:
807 continue
808
809 logging.warn('run process timeout (%s) fired on: %s', timeout,
810 bg_job.command)
811 nuke_subprocess(bg_job.sp)
812 bg_job.result.exit_status = bg_job.sp.poll()
813 bg_job.result.duration = time.time() - start_time
814
815 return True
816
817
818 def pid_is_alive(pid):
819 """
820 True if process pid exists and is not yet stuck in Zombie state.
821 Zombies are impossible to move between cgroups, etc.
822 pid can be integer, or text of integer.
823 """
824 path = '/proc/%s/stat' % pid
825
826 try:
827 stat = read_one_line(path)
828 except IOError:
829 if not os.path.exists(path):
830 # file went away
831 return False
832 raise
833
834 return stat.split()[2] != 'Z'
835
836
837 def signal_pid(pid, sig):
838 """
839 Sends a signal to a process id. Returns True if the process terminated
840 successfully, False otherwise.
841 """
842 try:
843 os.kill(pid, sig)
844 except OSError:
845 # The process may have died before we could kill it.
846 pass
847
848 for i in range(5):
849 if not pid_is_alive(pid):
850 return True
851 time.sleep(1)
852
853 # The process is still alive
854 return False
855
856
857 def nuke_subprocess(subproc):
858 # check if the subprocess is still alive, first
859 if subproc.poll() is not None:
860 return subproc.poll()
861
862 # the process has not terminated within timeout,
863 # kill it via an escalating series of signals.
864 signal_queue = [signal.SIGTERM, signal.SIGKILL]
865 for sig in signal_queue:
866 signal_pid(subproc.pid, sig)
867 if subproc.poll() is not None:
868 return subproc.poll()
869
870
871 def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
872 # the process has not terminated within timeout,
873 # kill it via an escalating series of signals.
874 for sig in signal_queue:
875 if signal_pid(pid, sig):
876 return
877
878 # no signal successfully terminated the process
879 raise error.AutoservRunError('Could not kill %d' % pid, None)
880
881
882 def system(command, timeout=None, ignore_status=False):
883 """
884 Run a command
885
886 @param timeout: timeout in seconds
887 @param ignore_status: if ignore_status=False, throw an exception if the
888 command's exit code is non-zero
889 if ignore_stauts=True, return the exit code.
890
891 @return exit status of command
892 (note, this will always be zero unless ignore_status=True)
893 """
894 return run(command, timeout=timeout, ignore_status=ignore_status,
895 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
896
897
898 def system_parallel(commands, timeout=None, ignore_status=False):
899 """This function returns a list of exit statuses for the respective
900 list of commands."""
901 return [bg_jobs.exit_status for bg_jobs in
902 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
903 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
904
905
906 def system_output(command, timeout=None, ignore_status=False,
907 retain_output=False, args=()):
908 """
909 Run a command and return the stdout output.
910
911 @param command: command string to execute.
912 @param timeout: time limit in seconds before attempting to kill the
913 running process. The function will take a few seconds longer
914 than 'timeout' to complete if it has to kill the process.
915 @param ignore_status: do not raise an exception, no matter what the exit
916 code of the command is.
917 @param retain_output: set to True to make stdout/stderr of the command
918 output to be also sent to the logging system
919 @param args: sequence of strings of arguments to be given to the command
920 inside " quotes after they have been escaped for that; each
921 element in the sequence will be given as a separate command
922 argument
923
924 @return a string with the stdout output of the command.
925 """
926 if retain_output:
927 out = run(command, timeout=timeout, ignore_status=ignore_status,
928 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
929 args=args).stdout
930 else:
931 out = run(command, timeout=timeout, ignore_status=ignore_status,
932 args=args).stdout
933 if out[-1:] == '\n':
934 out = out[:-1]
935 return out
936
937
938 def system_output_parallel(commands, timeout=None, ignore_status=False,
939 retain_output=False):
940 if retain_output:
941 out = [bg_job.stdout for bg_job
942 in run_parallel(commands, timeout=timeout,
943 ignore_status=ignore_status,
944 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
945 else:
946 out = [bg_job.stdout for bg_job in run_parallel(commands,
947 timeout=timeout, ignore_status=ignore_status)]
948 for x in out:
949 if out[-1:] == '\n': out = out[:-1]
950 return out
951
952
953 def strip_unicode(input):
954 if type(input) == list:
955 return [strip_unicode(i) for i in input]
956 elif type(input) == dict:
957 output = {}
958 for key in input.keys():
959 output[str(key)] = strip_unicode(input[key])
960 return output
961 elif type(input) == unicode:
962 return str(input)
963 else:
964 return input
965
966
967 def get_cpu_percentage(function, *args, **dargs):
968 """Returns a tuple containing the CPU% and return value from function call.
969
970 This function calculates the usage time by taking the difference of
971 the user and system times both before and after the function call.
972 """
973 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
974 self_pre = resource.getrusage(resource.RUSAGE_SELF)
975 start = time.time()
976 to_return = function(*args, **dargs)
977 elapsed = time.time() - start
978 self_post = resource.getrusage(resource.RUSAGE_SELF)
979 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
980
981 # Calculate CPU Percentage
982 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
983 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
984 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
985
986 return cpu_percent, to_return
987
988
989 class SystemLoad(object):
990 """
991 Get system and/or process values and return average value of load.
992 """
993 def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
994 use_log=False):
995 """
996 @param pids: List of pids to be monitored. If pid = 0 whole system will
997 be monitored. pid == 0 means whole system.
998 @param advanced: monitor add value for system irq count and softirq
999 for process minor and maior page fault
1000 @param time_step: Time step for continuous monitoring.
1001 @param cpu_cont: If True monitor CPU load continuously.
1002 @param use_log: If true every monitoring is logged for dump.
1003 """
1004 self.pids = []
1005 self.stats = {}
1006 for pid in pids:
1007 if pid == 0:
1008 cpu = FileFieldMonitor("/proc/stat",
1009 [("cpu", 0), # User Time
1010 ("cpu", 2), # System Time
1011 ("intr", 0), # IRQ Count
1012 ("softirq", 0)], # Soft IRQ Count
1013 True,
1014 cpu_cont,
1015 use_log,
1016 " +",
1017 time_step)
1018 mem = FileFieldMonitor("/proc/meminfo",
1019 [("MemTotal:", 0), # Mem Total
1020 ("MemFree:", 0), # Mem Free
1021 ("Buffers:", 0), # Buffers
1022 ("Cached:", 0)], # Cached
1023 False,
1024 True,
1025 use_log,
1026 " +",
1027 time_step)
1028 self.stats[pid] = ["TOTAL", cpu, mem]
1029 self.pids.append(pid)
1030 else:
1031 name = ""
1032 if (type(pid) is int):
1033 self.pids.append(pid)
1034 name = get_process_name(pid)
1035 else:
1036 self.pids.append(pid[0])
1037 name = pid[1]
1038
1039 cpu = FileFieldMonitor("/proc/%d/stat" %
1040 self.pids[-1],
1041 [("", 13), # User Time
1042 ("", 14), # System Time
1043 ("", 9), # Minority Page Fault
1044 ("", 11)], # Majority Page Fault
1045 True,
1046 cpu_cont,
1047 use_log,
1048 " +",
1049 time_step)
1050 mem = FileFieldMonitor("/proc/%d/status" %
1051 self.pids[-1],
1052 [("VmSize:", 0), # Virtual Memory Size
1053 ("VmRSS:", 0), # Resident Set Size
1054 ("VmPeak:", 0), # Peak VM Size
1055 ("VmSwap:", 0)], # VM in Swap
1056 False,
1057 True,
1058 use_log,
1059 " +",
1060 time_step)
1061 self.stats[self.pids[-1]] = [name, cpu, mem]
1062
1063 self.advanced = advanced
1064
1065
1066 def __str__(self):
1067 """
1068 Define format how to print
1069 """
1070 out = ""
1071 for pid in self.pids:
1072 for stat in self.stats[pid][1:]:
1073 out += str(stat.get_status()) + "\n"
1074 return out
1075
1076
1077 def start(self, pids=[]):
1078 """
1079 Start monitoring of the process system usage.
1080 @param pids: List of PIDs you intend to control. Use pids=[] to control
1081 all defined PIDs.
1082 """
1083 if pids == []:
1084 pids = self.pids
1085
1086 for pid in pids:
1087 for stat in self.stats[pid][1:]:
1088 stat.start()
1089
1090
1091 def stop(self, pids=[]):
1092 """
1093 Stop monitoring of the process system usage.
1094 @param pids: List of PIDs you intend to control. Use pids=[] to control
1095 all defined PIDs.
1096 """
1097 if pids == []:
1098 pids = self.pids
1099
1100 for pid in pids:
1101 for stat in self.stats[pid][1:]:
1102 stat.stop()
1103
1104
1105 def dump(self, pids=[]):
1106 """
1107 Get the status of monitoring.
1108 @param pids: List of PIDs you intend to control. Use pids=[] to control
1109 all defined PIDs.
1110 @return:
1111 tuple([cpu load], [memory load]):
1112 ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1113 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1114
1115 PID1_cpu_meas:
1116 average_values[], test_time, cont_meas_values[[]], time_step
1117 PID1_mem_meas:
1118 average_values[], test_time, cont_meas_values[[]], time_step
1119 where average_values[] are the measured values (mem_free,swap,...)
1120 which are described in SystemLoad.__init__()-FileFieldMonitor.
1121 cont_meas_values[[]] is a list of average_values in the sampling
1122 times.
1123 """
1124 if pids == []:
1125 pids = self.pids
1126
1127 cpus = []
1128 memory = []
1129 for pid in pids:
1130 stat = (pid, self.stats[pid][1].get_status())
1131 cpus.append(stat)
1132 for pid in pids:
1133 stat = (pid, self.stats[pid][2].get_status())
1134 memory.append(stat)
1135
1136 return (cpus, memory)
1137
1138
1139 def get_cpu_status_string(self, pids=[]):
1140 """
1141 Convert status to string array.
1142 @param pids: List of PIDs you intend to control. Use pids=[] to control
1143 all defined PIDs.
1144 @return: String format to table.
1145 """
1146 if pids == []:
1147 pids = self.pids
1148
1149 headers = ["NAME",
1150 ("%7s") % "PID",
1151 ("%5s") % "USER",
1152 ("%5s") % "SYS",
1153 ("%5s") % "SUM"]
1154 if self.advanced:
1155 headers.extend(["MINFLT/IRQC",
1156 "MAJFLT/SOFTIRQ"])
1157 headers.append(("%11s") % "TIME")
1158 textstatus = []
1159 for pid in pids:
1160 stat = self.stats[pid][1].get_status()
1161 time = stat[1]
1162 stat = stat[0]
1163 textstatus.append(["%s" % self.stats[pid][0],
1164 "%7s" % pid,
1165 "%4.0f%%" % (stat[0] / time),
1166 "%4.0f%%" % (stat[1] / time),
1167 "%4.0f%%" % ((stat[0] + stat[1]) / time),
1168 "%10.3fs" % time])
1169 if self.advanced:
1170 textstatus[-1].insert(-1, "%11d" % stat[2])
1171 textstatus[-1].insert(-1, "%14d" % stat[3])
1172
1173 return matrix_to_string(textstatus, tuple(headers))
1174
1175
1176 def get_mem_status_string(self, pids=[]):
1177 """
1178 Convert status to string array.
1179 @param pids: List of PIDs you intend to control. Use pids=[] to control
1180 all defined PIDs.
1181 @return: String format to table.
1182 """
1183 if pids == []:
1184 pids = self.pids
1185
1186 headers = ["NAME",
1187 ("%7s") % "PID",
1188 ("%8s") % "TOTAL/VMSIZE",
1189 ("%8s") % "FREE/VMRSS",
1190 ("%8s") % "BUFFERS/VMPEAK",
1191 ("%8s") % "CACHED/VMSWAP",
1192 ("%11s") % "TIME"]
1193 textstatus = []
1194 for pid in pids:
1195 stat = self.stats[pid][2].get_status()
1196 time = stat[1]
1197 stat = stat[0]
1198 textstatus.append(["%s" % self.stats[pid][0],
1199 "%7s" % pid,
1200 "%10dMB" % (stat[0] / 1024),
1201 "%8dMB" % (stat[1] / 1024),
1202 "%12dMB" % (stat[2] / 1024),
1203 "%11dMB" % (stat[3] / 1024),
1204 "%10.3fs" % time])
1205
1206 return matrix_to_string(textstatus, tuple(headers))
1207
1208
1209 def get_arch(run_function=run):
1210 """
1211 Get the hardware architecture of the machine.
1212 run_function is used to execute the commands. It defaults to
1213 utils.run() but a custom method (if provided) should be of the
1214 same schema as utils.run. It should return a CmdResult object and
1215 throw a CmdError exception.
1216 """
1217 arch = run_function('/bin/uname -m').stdout.rstrip()
1218 if re.match(r'i\d86$', arch):
1219 arch = 'i386'
1220 return arch
1221
1222
1223 def get_num_logical_cpus_per_socket(run_function=run):
1224 """
1225 Get the number of cores (including hyperthreading) per cpu.
1226 run_function is used to execute the commands. It defaults to
1227 utils.run() but a custom method (if provided) should be of the
1228 same schema as utils.run. It should return a CmdResult object and
1229 throw a CmdError exception.
1230 """
1231 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1232 num_siblings = map(int,
1233 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1234 siblings, re.M))
1235 if len(num_siblings) == 0:
1236 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1237 if min(num_siblings) != max(num_siblings):
1238 raise error.TestError('Number of siblings differ %r' %
1239 num_siblings)
1240 return num_siblings[0]
1241
1242
1243 def merge_trees(src, dest):
1244 """
1245 Merges a source directory tree at 'src' into a destination tree at
1246 'dest'. If a path is a file in both trees than the file in the source
1247 tree is APPENDED to the one in the destination tree. If a path is
1248 a directory in both trees then the directories are recursively merged
1249 with this function. In any other case, the function will skip the
1250 paths that cannot be merged (instead of failing).
1251 """
1252 if not os.path.exists(src):
1253 return # exists only in dest
1254 elif not os.path.exists(dest):
1255 if os.path.isfile(src):
1256 shutil.copy2(src, dest) # file only in src
1257 else:
1258 shutil.copytree(src, dest, symlinks=True) # dir only in src
1259 return
1260 elif os.path.isfile(src) and os.path.isfile(dest):
1261 # src & dest are files in both trees, append src to dest
1262 destfile = open(dest, "a")
1263 try:
1264 srcfile = open(src)
1265 try:
1266 destfile.write(srcfile.read())
1267 finally:
1268 srcfile.close()
1269 finally:
1270 destfile.close()
1271 elif os.path.isdir(src) and os.path.isdir(dest):
1272 # src & dest are directories in both trees, so recursively merge
1273 for name in os.listdir(src):
1274 merge_trees(os.path.join(src, name), os.path.join(dest, name))
1275 else:
1276 # src & dest both exist, but are incompatible
1277 return
1278
1279
1280 class CmdResult(object):
1281 """
1282 Command execution result.
1283
1284 command: String containing the command line itself
1285 exit_status: Integer exit code of the process
1286 stdout: String containing stdout of the process
1287 stderr: String containing stderr of the process
1288 duration: Elapsed wall clock time running the process
1289 """
1290
1291
1292 def __init__(self, command="", stdout="", stderr="",
1293 exit_status=None, duration=0):
1294 self.command = command
1295 self.exit_status = exit_status
1296 self.stdout = stdout
1297 self.stderr = stderr
1298 self.duration = duration
1299
1300
1301 def __repr__(self):
1302 wrapper = textwrap.TextWrapper(width = 78,
1303 initial_indent="\n ",
1304 subsequent_indent=" ")
1305
1306 stdout = self.stdout.rstrip()
1307 if stdout:
1308 stdout = "\nstdout:\n%s" % stdout
1309
1310 stderr = self.stderr.rstrip()
1311 if stderr:
1312 stderr = "\nstderr:\n%s" % stderr
1313
1314 return ("* Command: %s\n"
1315 "Exit status: %s\n"
1316 "Duration: %s\n"
1317 "%s"
1318 "%s"
1319 % (wrapper.fill(self.command), self.exit_status,
1320 self.duration, stdout, stderr))
1321
1322
1323 class run_randomly:
1324 def __init__(self, run_sequentially=False):
1325 # Run sequentially is for debugging control files
1326 self.test_list = []
1327 self.run_sequentially = run_sequentially
1328
1329
1330 def add(self, *args, **dargs):
1331 test = (args, dargs)
1332 self.test_list.append(test)
1333
1334
1335 def run(self, fn):
1336 while self.test_list:
1337 test_index = random.randint(0, len(self.test_list)-1)
1338 if self.run_sequentially:
1339 test_index = 0
1340 (args, dargs) = self.test_list.pop(test_index)
1341 fn(*args, **dargs)
1342
1343
1344 def import_site_module(path, module, dummy=None, modulefile=None):
1345 """
1346 Try to import the site specific module if it exists.
1347
1348 @param path full filename of the source file calling this (ie __file__)
1349 @param module full module name
1350 @param dummy dummy value to return in case there is no symbol to import
1351 @param modulefile module filename
1352
1353 @return site specific module or dummy
1354
1355 @raises ImportError if the site file exists but imports fails
1356 """
1357 short_module = module[module.rfind(".") + 1:]
1358
1359 if not modulefile:
1360 modulefile = short_module + ".py"
1361
1362 if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1363 return __import__(module, {}, {}, [short_module])
1364 return dummy
1365
1366
1367 def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1368 """
1369 Try to import site specific symbol from site specific file if it exists
1370
1371 @param path full filename of the source file calling this (ie __file__)
1372 @param module full module name
1373 @param name symbol name to be imported from the site file
1374 @param dummy dummy value to return in case there is no symbol to import
1375 @param modulefile module filename
1376
1377 @return site specific symbol or dummy
1378
1379 @raises ImportError if the site file exists but imports fails
1380 """
1381 module = import_site_module(path, module, modulefile=modulefile)
1382 if not module:
1383 return dummy
1384
1385 # special unique value to tell us if the symbol can't be imported
1386 cant_import = object()
1387
1388 obj = getattr(module, name, cant_import)
1389 if obj is cant_import:
1390 logging.debug("unable to import site symbol '%s', using non-site "
1391 "implementation", name)
1392 return dummy
1393
1394 return obj
1395
1396
1397 def import_site_class(path, module, classname, baseclass, modulefile=None):
1398 """
1399 Try to import site specific class from site specific file if it exists
1400
1401 Args:
1402 path: full filename of the source file calling this (ie __file__)
1403 module: full module name
1404 classname: class name to be loaded from site file
1405 baseclass: base class object to return when no site file present or
1406 to mixin when site class exists but is not inherited from baseclass
1407 modulefile: module filename
1408
1409 Returns: baseclass if site specific class does not exist, the site specific
1410 class if it exists and is inherited from baseclass or a mixin of the
1411 site specific class and baseclass when the site specific class exists
1412 and is not inherited from baseclass
1413
1414 Raises: ImportError if the site file exists but imports fails
1415 """
1416
1417 res = import_site_symbol(path, module, classname, None, modulefile)
1418 if res:
1419 if not issubclass(res, baseclass):
1420 # if not a subclass of baseclass then mix in baseclass with the
1421 # site specific class object and return the result
1422 res = type(classname, (res, baseclass), {})
1423 else:
1424 res = baseclass
1425
1426 return res
1427
1428
1429 def import_site_function(path, module, funcname, dummy, modulefile=None):
1430 """
1431 Try to import site specific function from site specific file if it exists
1432
1433 Args:
1434 path: full filename of the source file calling this (ie __file__)
1435 module: full module name
1436 funcname: function name to be imported from site file
1437 dummy: dummy function to return in case there is no function to import
1438 modulefile: module filename
1439
1440 Returns: site specific function object or dummy
1441
1442 Raises: ImportError if the site file exists but imports fails
1443 """
1444
1445 return import_site_symbol(path, module, funcname, dummy, modulefile)
1446
1447
1448 def _get_pid_path(program_name):
1449 my_path = os.path.dirname(__file__)
1450 return os.path.abspath(os.path.join(my_path, "..", "..",
1451 "%s.pid" % program_name))
1452
1453
1454 def write_pid(program_name):
1455 """
1456 Try to drop <program_name>.pid in the main autotest directory.
1457
1458 Args:
1459 program_name: prefix for file name
1460 """
1461 pidfile = open(_get_pid_path(program_name), "w")
1462 try:
1463 pidfile.write("%s\n" % os.getpid())
1464 finally:
1465 pidfile.close()
1466
1467
1468 def delete_pid_file_if_exists(program_name):
1469 """
1470 Tries to remove <program_name>.pid from the main autotest directory.
1471 """
1472 pidfile_path = _get_pid_path(program_name)
1473
1474 try:
1475 os.remove(pidfile_path)
1476 except OSError:
1477 if not os.path.exists(pidfile_path):
1478 return
1479 raise
1480
1481
1482 def get_pid_from_file(program_name):
1483 """
1484 Reads the pid from <program_name>.pid in the autotest directory.
1485
1486 @param program_name the name of the program
1487 @return the pid if the file exists, None otherwise.
1488 """
1489 pidfile_path = _get_pid_path(program_name)
1490 if not os.path.exists(pidfile_path):
1491 return None
1492
1493 pidfile = open(_get_pid_path(program_name), 'r')
1494
1495 try:
1496 try:
1497 pid = int(pidfile.readline())
1498 except IOError:
1499 if not os.path.exists(pidfile_path):
1500 return None
1501 raise
1502 finally:
1503 pidfile.close()
1504
1505 return pid
1506
1507
1508 def get_process_name(pid):
1509 """
1510 Get process name from PID.
1511 @param pid: PID of process.
1512 """
1513 return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
1514
1515
1516 def program_is_alive(program_name):
1517 """
1518 Checks if the process is alive and not in Zombie state.
1519
1520 @param program_name the name of the program
1521 @return True if still alive, False otherwise
1522 """
1523 pid = get_pid_from_file(program_name)
1524 if pid is None:
1525 return False
1526 return pid_is_alive(pid)
1527
1528
1529 def signal_program(program_name, sig=signal.SIGTERM):
1530 """
1531 Sends a signal to the process listed in <program_name>.pid
1532
1533 @param program_name the name of the program
1534 @param sig signal to send
1535 """
1536 pid = get_pid_from_file(program_name)
1537 if pid:
1538 signal_pid(pid, sig)
1539
1540
1541 def get_relative_path(path, reference):
1542 """Given 2 absolute paths "path" and "reference", compute the path of
1543 "path" as relative to the directory "reference".
1544
1545 @param path the absolute path to convert to a relative path
1546 @param reference an absolute directory path to which the relative
1547 path will be computed
1548 """
1549 # normalize the paths (remove double slashes, etc)
1550 assert(os.path.isabs(path))
1551 assert(os.path.isabs(reference))
1552
1553 path = os.path.normpath(path)
1554 reference = os.path.normpath(reference)
1555
1556 # we could use os.path.split() but it splits from the end
1557 path_list = path.split(os.path.sep)[1:]
1558 ref_list = reference.split(os.path.sep)[1:]
1559
1560 # find the longest leading common path
1561 for i in xrange(min(len(path_list), len(ref_list))):
1562 if path_list[i] != ref_list[i]:
1563 # decrement i so when exiting this loop either by no match or by
1564 # end of range we are one step behind
1565 i -= 1
1566 break
1567 i += 1
1568 # drop the common part of the paths, not interested in that anymore
1569 del path_list[:i]
1570
1571 # for each uncommon component in the reference prepend a ".."
1572 path_list[:0] = ['..'] * (len(ref_list) - i)
1573
1574 return os.path.join(*path_list)
1575
1576
1577 def sh_escape(command):
1578 """
1579 Escape special characters from a command so that it can be passed
1580 as a double quoted (" ") string in a (ba)sh command.
1581
1582 Args:
1583 command: the command string to escape.
1584
1585 Returns:
1586 The escaped command string. The required englobing double
1587 quotes are NOT added and so should be added at some point by
1588 the caller.
1589
1590 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1591 """
1592 command = command.replace("\\", "\\\\")
1593 command = command.replace("$", r'\$')
1594 command = command.replace('"', r'\"')
1595 command = command.replace('`', r'\`')
1596 return command
1597
1598
1599 def configure(extra=None, configure='./configure'):
1600 """
1601 Run configure passing in the correct host, build, and target options.
1602
1603 @param extra: extra command line arguments to pass to configure
1604 @param configure: which configure script to use
1605 """
1606 args = []
1607 if 'CHOST' in os.environ:
1608 args.append('--host=' + os.environ['CHOST'])
1609 if 'CBUILD' in os.environ:
1610 args.append('--build=' + os.environ['CBUILD'])
1611 if 'CTARGET' in os.environ:
1612 args.append('--target=' + os.environ['CTARGET'])
1613 if extra:
1614 args.append(extra)
1615
1616 system('%s %s' % (configure, ' '.join(args)))
1617
1618
1619 def make(extra='', make='make', timeout=None, ignore_status=False):
1620 """
1621 Run make, adding MAKEOPTS to the list of options.
1622
1623 @param extra: extra command line arguments to pass to make.
1624 """
1625 cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1626 return system(cmd, timeout=timeout, ignore_status=ignore_status)
1627
1628
1629 def compare_versions(ver1, ver2):
1630 """Version number comparison between ver1 and ver2 strings.
1631
1632 >>> compare_tuple("1", "2")
1633 -1
1634 >>> compare_tuple("foo-1.1", "foo-1.2")
1635 -1
1636 >>> compare_tuple("1.2", "1.2a")
1637 -1
1638 >>> compare_tuple("1.2b", "1.2a")
1639 1
1640 >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1641 -1
1642
1643 Args:
1644 ver1: version string
1645 ver2: version string
1646
1647 Returns:
1648 int: 1 if ver1 > ver2
1649 0 if ver1 == ver2
1650 -1 if ver1 < ver2
1651 """
1652 ax = re.split('[.-]', ver1)
1653 ay = re.split('[.-]', ver2)
1654 while len(ax) > 0 and len(ay) > 0:
1655 cx = ax.pop(0)
1656 cy = ay.pop(0)
1657 maxlen = max(len(cx), len(cy))
1658 c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1659 if c != 0:
1660 return c
1661 return cmp(len(ax), len(ay))
1662
1663
1664 def args_to_dict(args):
1665 """Convert autoserv extra arguments in the form of key=val or key:val to a
1666 dictionary. Each argument key is converted to lowercase dictionary key.
1667
1668 Args:
1669 args - list of autoserv extra arguments.
1670
1671 Returns:
1672 dictionary
1673 """
1674 arg_re = re.compile(r'(\w+)[:=](.*)$')
1675 dict = {}
1676 for arg in args:
1677 match = arg_re.match(arg)
1678 if match:
1679 dict[match.group(1).lower()] = match.group(2)
1680 else:
1681 logging.warning("args_to_dict: argument '%s' doesn't match "
1682 "'%s' pattern. Ignored." % (arg, arg_re.pattern))
1683 return dict
1684
1685
1686 def get_unused_port():
1687 """
1688 Finds a semi-random available port. A race condition is still
1689 possible after the port number is returned, if another process
1690 happens to bind it.
1691
1692 Returns:
1693 A port number that is unused on both TCP and UDP.
1694 """
1695
1696 def try_bind(port, socket_type, socket_proto):
1697 s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1698 try:
1699 try:
1700 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1701 s.bind(('', port))
1702 return s.getsockname()[1]
1703 except socket.error:
1704 return None
1705 finally:
1706 s.close()
1707
1708 # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1709 # same port over and over. So always try TCP first.
1710 while True:
1711 # Ask the OS for an unused port.
1712 port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1713 # Check if this port is unused on the other protocol.
1714 if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1715 return port
OLDNEW
« no previous file with comments | « client/common_lib/software_manager.py ('k') | client/profilers/cpistat/cpistat.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698