Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(581)

Side by Side Diff: client/common_lib/utils.py

Issue 4823005: Merge remote branch 'cros/upstream' into tempbranch (Closed) Base URL: http://git.chromium.org/git/autotest.git@master
Patch Set: patch Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « client/common_lib/global_config_unittest.py ('k') | client/samples/all_tests » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # 1 #
2 # Copyright 2008 Google Inc. Released under the GPL v2 2 # Copyright 2008 Google Inc. Released under the GPL v2
3 3
4 import os, pickle, random, re, resource, select, shutil, signal, StringIO 4 import os, pickle, random, re, resource, select, shutil, signal, StringIO
5 import socket, struct, subprocess, sys, time, textwrap, urlparse 5 import socket, struct, subprocess, sys, time, textwrap, urlparse
6 import warnings, smtplib, logging, urllib2 6 import warnings, smtplib, logging, urllib2
7 from threading import Thread, Event
7 try: 8 try:
8 import hashlib 9 import hashlib
9 except ImportError: 10 except ImportError:
10 import md5, sha 11 import md5, sha
11 from autotest_lib.client.common_lib import error, logging_manager 12 from autotest_lib.client.common_lib import error, logging_manager
12 13
13 def deprecated(func): 14 def deprecated(func):
14 """This is a decorator which can be used to mark functions as deprecated. 15 """This is a decorator which can be used to mark functions as deprecated.
15 It will result in a warning being emmitted when the function is used.""" 16 It will result in a warning being emmitted when the function is used."""
16 def new_func(*args, **dargs): 17 def new_func(*args, **dargs):
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 184
184 185
185 def read_file(filename): 186 def read_file(filename):
186 f = open(filename) 187 f = open(filename)
187 try: 188 try:
188 return f.read() 189 return f.read()
189 finally: 190 finally:
190 f.close() 191 f.close()
191 192
192 193
194 def get_field(data, param, linestart="", sep=" "):
195 """
196 Parse data from string.
197 @param data: Data to parse.
198 example:
199 data:
200 cpu 324 345 34 5 345
201 cpu0 34 11 34 34 33
202 ^^^^
203 start of line
204 params 0 1 2 3 4
205 @param param: Position of parameter after linestart marker.
206 @param linestart: String to which start line with parameters.
207 @param sep: Separator between parameters regular expression.
208 """
209 search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
210 find = search.search(data)
211 if find != None:
212 return re.split("%s" % sep, find.group(1))[param]
213 else:
214 print "There is no line which starts with %s in data." % linestart
215 return None
216
217
193 def write_one_line(filename, line): 218 def write_one_line(filename, line):
194 open_write_close(filename, line.rstrip('\n') + '\n') 219 open_write_close(filename, line.rstrip('\n') + '\n')
195 220
196 221
197 def open_write_close(filename, data): 222 def open_write_close(filename, data):
198 f = open(filename, 'w') 223 f = open(filename, 'w')
199 try: 224 try:
200 f.write(data) 225 f.write(data)
201 finally: 226 finally:
202 f.close() 227 f.close()
203 228
204 229
205 def matrix_to_string(matrix, header=None): 230 def matrix_to_string(matrix, header=None):
206 """ 231 """
207 Return a pretty, aligned string representation of a nxm matrix. 232 Return a pretty, aligned string representation of a nxm matrix.
208 233
209 This representation can be used to print any tabular data, such as 234 This representation can be used to print any tabular data, such as
210 database results. It works by scanning the lengths of each element 235 database results. It works by scanning the lengths of each element
211 in each column, and determining the format string dynamically. 236 in each column, and determining the format string dynamically.
212 237
213 @param matrix: Matrix representation (list with n rows of m elements). 238 @param matrix: Matrix representation (list with n rows of m elements).
214 @param header: Optional tuple with header elements to be displayed. 239 @param header: Optional tuple or list with header elements to be displayed.
215 """ 240 """
241 if type(header) is list:
242 header = tuple(header)
216 lengths = [] 243 lengths = []
244 if header:
245 for column in header:
246 lengths.append(len(column))
217 for row in matrix: 247 for row in matrix:
218 for column in row: 248 for column in row:
219 i = row.index(column) 249 i = row.index(column)
220 cl = len(column) 250 cl = len(column)
221 try: 251 try:
222 ml = lengths[i] 252 ml = lengths[i]
223 if cl > ml: 253 if cl > ml:
224 lengths[i] = cl 254 lengths[i] = cl
225 except IndexError: 255 except IndexError:
226 lengths.append(cl) 256 lengths.append(cl)
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag) 317 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
288 try: 318 try:
289 for key in sorted(dictionary.keys()): 319 for key in sorted(dictionary.keys()):
290 if not key_regex.search(key): 320 if not key_regex.search(key):
291 raise ValueError('Invalid key: %s' % key) 321 raise ValueError('Invalid key: %s' % key)
292 keyval.write('%s=%s\n' % (key, dictionary[key])) 322 keyval.write('%s=%s\n' % (key, dictionary[key]))
293 finally: 323 finally:
294 keyval.close() 324 keyval.close()
295 325
296 326
327 class FileFieldMonitor(object):
328 """
329 Monitors the information from the file and reports it's values.
330
331 It gather the information at start and stop of the measurement or
332 continuously during the measurement.
333 """
334 class Monitor(Thread):
335 """
336 Internal monitor class to ensure continuous monitor of monitored file.
337 """
338 def __init__(self, master):
339 """
340 @param master: Master class which control Monitor
341 """
342 Thread.__init__(self)
343 self.master = master
344
345 def run(self):
346 """
347 Start monitor in thread mode
348 """
349 while not self.master.end_event.isSet():
350 self.master._get_value(self.master.logging)
351 time.sleep(self.master.time_step)
352
353
354 def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
355 contlogging=False, separator=" +", time_step=0.1):
356 """
357 Initialize variables.
358 @param status_file: File contain status.
359 @param mode_diff: If True make a difference of value, else average.
360 @param data_to_read: List of tuples with data position.
361 format: [(start_of_line,position in params)]
362 example:
363 data:
364 cpu 324 345 34 5 345
365 cpu0 34 11 34 34 33
366 ^^^^
367 start of line
368 params 0 1 2 3 4
369 @param mode_diff: True to subtract old value from new value,
370 False make average of the values.
371 @parma continuously: Start the monitoring thread using the time_step
372 as the measurement period.
373 @param contlogging: Log data in continuous run.
374 @param separator: Regular expression of separator.
375 @param time_step: Time period of the monitoring value.
376 """
377 self.end_event = Event()
378 self.start_time = 0
379 self.end_time = 0
380 self.test_time = 0
381
382 self.status_file = status_file
383 self.separator = separator
384 self.data_to_read = data_to_read
385 self.num_of_params = len(self.data_to_read)
386 self.mode_diff = mode_diff
387 self.continuously = continuously
388 self.time_step = time_step
389
390 self.value = [0 for i in range(self.num_of_params)]
391 self.old_value = [0 for i in range(self.num_of_params)]
392 self.log = []
393 self.logging = contlogging
394
395 self.started = False
396 self.num_of_get_value = 0
397 self.monitor = None
398
399
400 def _get_value(self, logging=True):
401 """
402 Return current values.
403 @param logging: If true log value in memory. There can be problem
404 with long run.
405 """
406 data = read_file(self.status_file)
407 value = []
408 for i in range(self.num_of_params):
409 value.append(int(get_field(data,
410 self.data_to_read[i][1],
411 self.data_to_read[i][0],
412 self.separator)))
413
414 if logging:
415 self.log.append(value)
416 if not self.mode_diff:
417 value = map(lambda x, y: x + y, value, self.old_value)
418
419 self.old_value = value
420 self.num_of_get_value += 1
421 return value
422
423
424 def start(self):
425 """
426 Start value monitor.
427 """
428 if self.started:
429 self.stop()
430 self.old_value = [0 for i in range(self.num_of_params)]
431 self.num_of_get_value = 0
432 self.log = []
433 self.end_event.clear()
434 self.start_time = time.time()
435 self._get_value()
436 self.started = True
437 if (self.continuously):
438 self.monitor = FileFieldMonitor.Monitor(self)
439 self.monitor.start()
440
441
442 def stop(self):
443 """
444 Stop value monitor.
445 """
446 if self.started:
447 self.started = False
448 self.end_time = time.time()
449 self.test_time = self.end_time - self.start_time
450 self.value = self._get_value()
451 if (self.continuously):
452 self.end_event.set()
453 self.monitor.join()
454 if (self.mode_diff):
455 self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
456 else:
457 self.value = map(lambda x: x / self.num_of_get_value,
458 self.value)
459
460
461 def get_status(self):
462 """
463 @return: Status of monitored process average value,
464 time of test and array of monitored values and time step of
465 continuous run.
466 """
467 if self.started:
468 self.stop()
469 if self.mode_diff:
470 for i in range(len(self.log) - 1):
471 self.log[i] = (map(lambda x, y: x - y,
472 self.log[i + 1], self.log[i]))
473 self.log.pop()
474 return (self.value, self.test_time, self.log, self.time_step)
475
476
297 def is_url(path): 477 def is_url(path):
298 """Return true if path looks like a URL""" 478 """Return true if path looks like a URL"""
299 # for now, just handle http and ftp 479 # for now, just handle http and ftp
300 url_parts = urlparse.urlparse(path) 480 url_parts = urlparse.urlparse(path)
301 return (url_parts[0] in ('http', 'ftp')) 481 return (url_parts[0] in ('http', 'ftp'))
302 482
303 483
304 def urlopen(url, data=None, timeout=5): 484 def urlopen(url, data=None, timeout=5):
305 """Wrapper to urllib2.urlopen with timeout addition.""" 485 """Wrapper to urllib2.urlopen with timeout addition."""
306 486
(...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after
794 child_post = resource.getrusage(resource.RUSAGE_CHILDREN) 974 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
795 975
796 # Calculate CPU Percentage 976 # Calculate CPU Percentage
797 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]] 977 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
798 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]] 978 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
799 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed 979 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
800 980
801 return cpu_percent, to_return 981 return cpu_percent, to_return
802 982
803 983
984 class SystemLoad(object):
985 """
986 Get system and/or process values and return average value of load.
987 """
988 def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
989 use_log=False):
990 """
991 @param pids: List of pids to be monitored. If pid = 0 whole system will
992 be monitored. pid == 0 means whole system.
993 @param advanced: monitor add value for system irq count and softirq
994 for process minor and maior page fault
995 @param time_step: Time step for continuous monitoring.
996 @param cpu_cont: If True monitor CPU load continuously.
997 @param use_log: If true every monitoring is logged for dump.
998 """
999 self.pids = []
1000 self.stats = {}
1001 for pid in pids:
1002 if pid == 0:
1003 cpu = FileFieldMonitor("/proc/stat",
1004 [("cpu", 0), # User Time
1005 ("cpu", 2), # System Time
1006 ("intr", 0), # IRQ Count
1007 ("softirq", 0)], # Soft IRQ Count
1008 True,
1009 cpu_cont,
1010 use_log,
1011 " +",
1012 time_step)
1013 mem = FileFieldMonitor("/proc/meminfo",
1014 [("MemTotal:", 0), # Mem Total
1015 ("MemFree:", 0), # Mem Free
1016 ("Buffers:", 0), # Buffers
1017 ("Cached:", 0)], # Cached
1018 False,
1019 True,
1020 use_log,
1021 " +",
1022 time_step)
1023 self.stats[pid] = ["TOTAL", cpu, mem]
1024 self.pids.append(pid)
1025 else:
1026 name = ""
1027 if (type(pid) is int):
1028 self.pids.append(pid)
1029 name = get_process_name(pid)
1030 else:
1031 self.pids.append(pid[0])
1032 name = pid[1]
1033
1034 cpu = FileFieldMonitor("/proc/%d/stat" %
1035 self.pids[-1],
1036 [("", 13), # User Time
1037 ("", 14), # System Time
1038 ("", 9), # Minority Page Fault
1039 ("", 11)], # Majority Page Fault
1040 True,
1041 cpu_cont,
1042 use_log,
1043 " +",
1044 time_step)
1045 mem = FileFieldMonitor("/proc/%d/status" %
1046 self.pids[-1],
1047 [("VmSize:", 0), # Virtual Memory Size
1048 ("VmRSS:", 0), # Resident Set Size
1049 ("VmPeak:", 0), # Peak VM Size
1050 ("VmSwap:", 0)], # VM in Swap
1051 False,
1052 True,
1053 use_log,
1054 " +",
1055 time_step)
1056 self.stats[self.pids[-1]] = [name, cpu, mem]
1057
1058 self.advanced = advanced
1059
1060
1061 def __str__(self):
1062 """
1063 Define format how to print
1064 """
1065 out = ""
1066 for pid in self.pids:
1067 for stat in self.stats[pid][1:]:
1068 out += str(stat.get_status()) + "\n"
1069 return out
1070
1071
1072 def start(self, pids=[]):
1073 """
1074 Start monitoring of the process system usage.
1075 @param pids: List of PIDs you intend to control. Use pids=[] to control
1076 all defined PIDs.
1077 """
1078 if pids == []:
1079 pids = self.pids
1080
1081 for pid in pids:
1082 for stat in self.stats[pid][1:]:
1083 stat.start()
1084
1085
1086 def stop(self, pids=[]):
1087 """
1088 Stop monitoring of the process system usage.
1089 @param pids: List of PIDs you intend to control. Use pids=[] to control
1090 all defined PIDs.
1091 """
1092 if pids == []:
1093 pids = self.pids
1094
1095 for pid in pids:
1096 for stat in self.stats[pid][1:]:
1097 stat.stop()
1098
1099
1100 def dump(self, pids=[]):
1101 """
1102 Get the status of monitoring.
1103 @param pids: List of PIDs you intend to control. Use pids=[] to control
1104 all defined PIDs.
1105 @return:
1106 tuple([cpu load], [memory load]):
1107 ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1108 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1109
1110 PID1_cpu_meas:
1111 average_values[], test_time, cont_meas_values[[]], time_step
1112 PID1_mem_meas:
1113 average_values[], test_time, cont_meas_values[[]], time_step
1114 where average_values[] are the measured values (mem_free,swap,...)
1115 which are described in SystemLoad.__init__()-FileFieldMonitor.
1116 cont_meas_values[[]] is a list of average_values in the sampling
1117 times.
1118 """
1119 if pids == []:
1120 pids = self.pids
1121
1122 cpus = []
1123 memory = []
1124 for pid in pids:
1125 stat = (pid, self.stats[pid][1].get_status())
1126 cpus.append(stat)
1127 for pid in pids:
1128 stat = (pid, self.stats[pid][2].get_status())
1129 memory.append(stat)
1130
1131 return (cpus, memory)
1132
1133
1134 def get_cpu_status_string(self, pids=[]):
1135 """
1136 Convert status to string array.
1137 @param pids: List of PIDs you intend to control. Use pids=[] to control
1138 all defined PIDs.
1139 @return: String format to table.
1140 """
1141 if pids == []:
1142 pids = self.pids
1143
1144 headers = ["NAME",
1145 ("%7s") % "PID",
1146 ("%5s") % "USER",
1147 ("%5s") % "SYS",
1148 ("%5s") % "SUM"]
1149 if self.advanced:
1150 headers.extend(["MINFLT/IRQC",
1151 "MAJFLT/SOFTIRQ"])
1152 headers.append(("%11s") % "TIME")
1153 textstatus = []
1154 for pid in pids:
1155 stat = self.stats[pid][1].get_status()
1156 time = stat[1]
1157 stat = stat[0]
1158 textstatus.append(["%s" % self.stats[pid][0],
1159 "%7s" % pid,
1160 "%4.0f%%" % (stat[0] / time),
1161 "%4.0f%%" % (stat[1] / time),
1162 "%4.0f%%" % ((stat[0] + stat[1]) / time),
1163 "%10.3fs" % time])
1164 if self.advanced:
1165 textstatus[-1].insert(-1, "%11d" % stat[2])
1166 textstatus[-1].insert(-1, "%14d" % stat[3])
1167
1168 return matrix_to_string(textstatus, tuple(headers))
1169
1170
1171 def get_mem_status_string(self, pids=[]):
1172 """
1173 Convert status to string array.
1174 @param pids: List of PIDs you intend to control. Use pids=[] to control
1175 all defined PIDs.
1176 @return: String format to table.
1177 """
1178 if pids == []:
1179 pids = self.pids
1180
1181 headers = ["NAME",
1182 ("%7s") % "PID",
1183 ("%8s") % "TOTAL/VMSIZE",
1184 ("%8s") % "FREE/VMRSS",
1185 ("%8s") % "BUFFERS/VMPEAK",
1186 ("%8s") % "CACHED/VMSWAP",
1187 ("%11s") % "TIME"]
1188 textstatus = []
1189 for pid in pids:
1190 stat = self.stats[pid][2].get_status()
1191 time = stat[1]
1192 stat = stat[0]
1193 textstatus.append(["%s" % self.stats[pid][0],
1194 "%7s" % pid,
1195 "%10dMB" % (stat[0] / 1024),
1196 "%8dMB" % (stat[1] / 1024),
1197 "%12dMB" % (stat[2] / 1024),
1198 "%11dMB" % (stat[3] / 1024),
1199 "%10.3fs" % time])
1200
1201 return matrix_to_string(textstatus, tuple(headers))
1202
1203
804 def get_arch(run_function=run): 1204 def get_arch(run_function=run):
805 """ 1205 """
806 Get the hardware architecture of the machine. 1206 Get the hardware architecture of the machine.
807 run_function is used to execute the commands. It defaults to 1207 run_function is used to execute the commands. It defaults to
808 utils.run() but a custom method (if provided) should be of the 1208 utils.run() but a custom method (if provided) should be of the
809 same schema as utils.run. It should return a CmdResult object and 1209 same schema as utils.run. It should return a CmdResult object and
810 throw a CmdError exception. 1210 throw a CmdError exception.
811 """ 1211 """
812 arch = run_function('/bin/uname -m').stdout.rstrip() 1212 arch = run_function('/bin/uname -m').stdout.rstrip()
813 if re.match(r'i\d86$', arch): 1213 if re.match(r'i\d86$', arch):
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after
1093 except IOError: 1493 except IOError:
1094 if not os.path.exists(pidfile_path): 1494 if not os.path.exists(pidfile_path):
1095 return None 1495 return None
1096 raise 1496 raise
1097 finally: 1497 finally:
1098 pidfile.close() 1498 pidfile.close()
1099 1499
1100 return pid 1500 return pid
1101 1501
1102 1502
1503 def get_process_name(pid):
1504 """
1505 Get process name from PID.
1506 @param pid: PID of process.
1507 """
1508 return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
1509
1510
1103 def program_is_alive(program_name): 1511 def program_is_alive(program_name):
1104 """ 1512 """
1105 Checks if the process is alive and not in Zombie state. 1513 Checks if the process is alive and not in Zombie state.
1106 1514
1107 @param program_name the name of the program 1515 @param program_name the name of the program
1108 @return True if still alive, False otherwise 1516 @return True if still alive, False otherwise
1109 """ 1517 """
1110 pid = get_pid_from_file(program_name) 1518 pid = get_pid_from_file(program_name)
1111 if pid is None: 1519 if pid is None:
1112 return False 1520 return False
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
1293 s.close() 1701 s.close()
1294 1702
1295 # On the 2.6 kernel, calling try_bind() on UDP socket returns the 1703 # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1296 # same port over and over. So always try TCP first. 1704 # same port over and over. So always try TCP first.
1297 while True: 1705 while True:
1298 # Ask the OS for an unused port. 1706 # Ask the OS for an unused port.
1299 port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP) 1707 port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1300 # Check if this port is unused on the other protocol. 1708 # Check if this port is unused on the other protocol.
1301 if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP): 1709 if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1302 return port 1710 return port
OLDNEW
« no previous file with comments | « client/common_lib/global_config_unittest.py ('k') | client/samples/all_tests » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698