OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 """ | 2 """ |
3 KVM configuration file utility functions. | 3 KVM test configuration file parser |
4 | 4 |
5 @copyright: Red Hat 2008-2010 | 5 @copyright: Red Hat 2008-2011 |
6 """ | 6 """ |
7 | 7 |
8 import logging, re, os, sys, optparse, array, traceback, cPickle | 8 import re, os, sys, optparse, collections, string |
9 import common | |
10 import kvm_utils | |
11 from autotest_lib.client.common_lib import error | |
12 from autotest_lib.client.common_lib import logging_manager | |
13 | 9 |
14 | 10 |
15 class config: | 11 # Filter syntax: |
| 12 # , means OR |
| 13 # .. means AND |
| 14 # . means IMMEDIATELY-FOLLOWED-BY |
| 15 |
| 16 # Example: |
| 17 # qcow2..Fedora.14, RHEL.6..raw..boot, smp2..qcow2..migrate..ide |
| 18 # means match all dicts whose names have: |
| 19 # (qcow2 AND (Fedora IMMEDIATELY-FOLLOWED-BY 14)) OR |
| 20 # ((RHEL IMMEDIATELY-FOLLOWED-BY 6) AND raw AND boot) OR |
| 21 # (smp2 AND qcow2 AND migrate AND ide) |
| 22 |
| 23 # Note: |
| 24 # 'qcow2..Fedora.14' is equivalent to 'Fedora.14..qcow2'. |
| 25 # 'qcow2..Fedora.14' is not equivalent to 'qcow2..14.Fedora'. |
| 26 # 'ide, scsi' is equivalent to 'scsi, ide'. |
| 27 |
| 28 # Filters can be used in 3 ways: |
| 29 # only <filter> |
| 30 # no <filter> |
| 31 # <filter>: |
| 32 # The last one starts a conditional block. |
| 33 |
| 34 |
| 35 class ParserError: |
| 36 def __init__(self, msg, line=None, filename=None, linenum=None): |
| 37 self.msg = msg |
| 38 self.line = line |
| 39 self.filename = filename |
| 40 self.linenum = linenum |
| 41 |
| 42 def __str__(self): |
| 43 if self.line: |
| 44 return "%s: %r (%s:%s)" % (self.msg, self.line, |
| 45 self.filename, self.linenum) |
| 46 else: |
| 47 return "%s (%s:%s)" % (self.msg, self.filename, self.linenum) |
| 48 |
| 49 |
| 50 num_failed_cases = 5 |
| 51 |
| 52 |
| 53 class Node(object): |
| 54 def __init__(self): |
| 55 self.name = [] |
| 56 self.dep = [] |
| 57 self.content = [] |
| 58 self.children = [] |
| 59 self.labels = set() |
| 60 self.append_to_shortname = False |
| 61 self.failed_cases = collections.deque() |
| 62 |
| 63 |
| 64 def _match_adjacent(block, ctx, ctx_set): |
| 65 # TODO: explain what this function does |
| 66 if block[0] not in ctx_set: |
| 67 return 0 |
| 68 if len(block) == 1: |
| 69 return 1 |
| 70 if block[1] not in ctx_set: |
| 71 return int(ctx[-1] == block[0]) |
| 72 k = 0 |
| 73 i = ctx.index(block[0]) |
| 74 while i < len(ctx): |
| 75 if k > 0 and ctx[i] != block[k]: |
| 76 i -= k - 1 |
| 77 k = 0 |
| 78 if ctx[i] == block[k]: |
| 79 k += 1 |
| 80 if k >= len(block): |
| 81 break |
| 82 if block[k] not in ctx_set: |
| 83 break |
| 84 i += 1 |
| 85 return k |
| 86 |
| 87 |
| 88 def _might_match_adjacent(block, ctx, ctx_set, descendant_labels): |
| 89 matched = _match_adjacent(block, ctx, ctx_set) |
| 90 for elem in block[matched:]: |
| 91 if elem not in descendant_labels: |
| 92 return False |
| 93 return True |
| 94 |
| 95 |
| 96 # Filter must inherit from object (otherwise type() won't work) |
| 97 class Filter(object): |
| 98 def __init__(self, s): |
| 99 self.filter = [] |
| 100 for char in s: |
| 101 if not (char.isalnum() or char.isspace() or char in ".,_-"): |
| 102 raise ParserError("Illegal characters in filter") |
| 103 for word in s.replace(",", " ").split(): |
| 104 word = [block.split(".") for block in word.split("..")] |
| 105 for block in word: |
| 106 for elem in block: |
| 107 if not elem: |
| 108 raise ParserError("Syntax error") |
| 109 self.filter += [word] |
| 110 |
| 111 |
| 112 def match(self, ctx, ctx_set): |
| 113 for word in self.filter: |
| 114 for block in word: |
| 115 if _match_adjacent(block, ctx, ctx_set) != len(block): |
| 116 break |
| 117 else: |
| 118 return True |
| 119 return False |
| 120 |
| 121 |
| 122 def might_match(self, ctx, ctx_set, descendant_labels): |
| 123 for word in self.filter: |
| 124 for block in word: |
| 125 if not _might_match_adjacent(block, ctx, ctx_set, |
| 126 descendant_labels): |
| 127 break |
| 128 else: |
| 129 return True |
| 130 return False |
| 131 |
| 132 |
| 133 class NoOnlyFilter(Filter): |
| 134 def __init__(self, line): |
| 135 Filter.__init__(self, line.split(None, 1)[1]) |
| 136 self.line = line |
| 137 |
| 138 |
| 139 class OnlyFilter(NoOnlyFilter): |
| 140 def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set, |
| 141 descendant_labels): |
| 142 for word in self.filter: |
| 143 for block in word: |
| 144 if (_match_adjacent(block, ctx, ctx_set) > |
| 145 _match_adjacent(block, failed_ctx, failed_ctx_set)): |
| 146 return self.might_match(ctx, ctx_set, descendant_labels) |
| 147 return False |
| 148 |
| 149 |
| 150 class NoFilter(NoOnlyFilter): |
| 151 def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set, |
| 152 descendant_labels): |
| 153 for word in self.filter: |
| 154 for block in word: |
| 155 if (_match_adjacent(block, ctx, ctx_set) < |
| 156 _match_adjacent(block, failed_ctx, failed_ctx_set)): |
| 157 return not self.match(ctx, ctx_set) |
| 158 return False |
| 159 |
| 160 |
| 161 class Condition(NoFilter): |
| 162 def __init__(self, line): |
| 163 Filter.__init__(self, line.rstrip(":")) |
| 164 self.line = line |
| 165 self.content = [] |
| 166 |
| 167 |
| 168 class Parser(object): |
16 """ | 169 """ |
17 Parse an input file or string that follows the KVM Test Config File format | 170 Parse an input file or string that follows the KVM Test Config File format |
18 and generate a list of dicts that will be later used as configuration | 171 and generate a list of dicts that will be later used as configuration |
19 parameters by the KVM tests. | 172 parameters by the KVM tests. |
20 | 173 |
21 @see: http://www.linux-kvm.org/page/KVM-Autotest/Test_Config_File | 174 @see: http://www.linux-kvm.org/page/KVM-Autotest/Test_Config_File |
22 """ | 175 """ |
23 | 176 |
24 def __init__(self, filename=None, debug=True): | 177 def __init__(self, filename=None, debug=False): |
25 """ | 178 """ |
26 Initialize the list and optionally parse a file. | 179 Initialize the parser and optionally parse a file. |
27 | 180 |
28 @param filename: Path of the file that will be taken. | 181 @param filename: Path of the file to parse. |
29 @param debug: Whether to turn on debugging output. | 182 @param debug: Whether to turn on debugging output. |
30 """ | 183 """ |
31 self.list = [array.array("H", [4, 4, 4, 4])] | 184 self.node = Node() |
32 self.object_cache = [] | |
33 self.object_cache_indices = {} | |
34 self.regex_cache = {} | |
35 self.debug = debug | 185 self.debug = debug |
36 if filename: | 186 if filename: |
37 self.parse_file(filename) | 187 self.parse_file(filename) |
38 | 188 |
39 | 189 |
40 def parse_file(self, filename): | 190 def parse_file(self, filename): |
41 """ | 191 """ |
42 Parse file. If it doesn't exist, raise an IOError. | 192 Parse a file. |
43 | 193 |
44 @param filename: Path of the configuration file. | 194 @param filename: Path of the configuration file. |
45 """ | 195 """ |
46 if not os.path.exists(filename): | 196 self.node = self._parse(FileReader(filename), self.node) |
47 raise IOError("File %s not found" % filename) | 197 |
48 str = open(filename).read() | 198 |
49 self.list = self.parse(configreader(filename, str), self.list) | 199 def parse_string(self, s): |
50 | |
51 | |
52 def parse_string(self, str): | |
53 """ | 200 """ |
54 Parse a string. | 201 Parse a string. |
55 | 202 |
56 @param str: String to parse. | 203 @param s: String to parse. |
57 """ | 204 """ |
58 self.list = self.parse(configreader('<string>', str, real_file=False), s
elf.list) | 205 self.node = self._parse(StrReader(s), self.node) |
59 | 206 |
60 | 207 |
61 def fork_and_parse(self, filename=None, str=None): | 208 def get_dicts(self, node=None, ctx=[], content=[], shortname=[], dep=[]): |
62 """ | |
63 Parse a file and/or a string in a separate process to save memory. | |
64 | |
65 Python likes to keep memory to itself even after the objects occupying | |
66 it have been destroyed. If during a call to parse_file() or | |
67 parse_string() a lot of memory is used, it can only be freed by | |
68 terminating the process. This function works around the problem by | |
69 doing the parsing in a forked process and then terminating it, freeing | |
70 any unneeded memory. | |
71 | |
72 Note: if an exception is raised during parsing, its information will be | |
73 printed, and the resulting list will be empty. The exception will not | |
74 be raised in the process calling this function. | |
75 | |
76 @param filename: Path of file to parse (optional). | |
77 @param str: String to parse (optional). | |
78 """ | |
79 r, w = os.pipe() | |
80 r, w = os.fdopen(r, "r"), os.fdopen(w, "w") | |
81 pid = os.fork() | |
82 if not pid: | |
83 # Child process | |
84 r.close() | |
85 try: | |
86 if filename: | |
87 self.parse_file(filename) | |
88 if str: | |
89 self.parse_string(str) | |
90 except: | |
91 traceback.print_exc() | |
92 self.list = [] | |
93 # Convert the arrays to strings before pickling because at least | |
94 # some Python versions can't pickle/unpickle arrays | |
95 l = [a.tostring() for a in self.list] | |
96 cPickle.dump((l, self.object_cache), w, -1) | |
97 w.close() | |
98 os._exit(0) | |
99 else: | |
100 # Parent process | |
101 w.close() | |
102 (l, self.object_cache) = cPickle.load(r) | |
103 r.close() | |
104 os.waitpid(pid, 0) | |
105 self.list = [] | |
106 for s in l: | |
107 a = array.array("H") | |
108 a.fromstring(s) | |
109 self.list.append(a) | |
110 | |
111 | |
112 def get_generator(self): | |
113 """ | 209 """ |
114 Generate dictionaries from the code parsed so far. This should | 210 Generate dictionaries from the code parsed so far. This should |
115 probably be called after parsing something. | 211 be called after parsing something. |
116 | 212 |
117 @return: A dict generator. | 213 @return: A dict generator. |
118 """ | 214 """ |
119 for a in self.list: | 215 def process_content(content, failed_filters): |
120 name, shortname, depend, content = _array_get_all(a, | 216 # 1. Check that the filters in content are OK with the current |
121 self.object_cache) | 217 # context (ctx). |
122 dict = {"name": name, "shortname": shortname, "depend": depend} | 218 # 2. Move the parts of content that are still relevant into |
123 self._apply_content_to_dict(dict, content) | 219 # new_content and unpack conditional blocks if appropriate. |
124 yield dict | 220 # For example, if an 'only' statement fully matches ctx, it |
125 | 221 # becomes irrelevant and is not appended to new_content. |
126 | 222 # If a conditional block fully matches, its contents are |
127 def get_list(self): | 223 # unpacked into new_content. |
128 """ | 224 # 3. Move failed filters into failed_filters, so that next time we |
129 Generate a list of dictionaries from the code parsed so far. | 225 # reach this node or one of its ancestors, we'll check those |
130 This should probably be called after parsing something. | 226 # filters first. |
131 | 227 for t in content: |
132 @return: A list of dicts. | 228 filename, linenum, obj = t |
133 """ | 229 if type(obj) is str: |
134 return list(self.get_generator()) | 230 new_content.append(t) |
135 | 231 continue |
136 | 232 elif type(obj) is OnlyFilter: |
137 def count(self, filter=".*"): | 233 if not obj.might_match(ctx, ctx_set, labels): |
138 """ | 234 self._debug(" filter did not pass: %r (%s:%s)", |
139 Return the number of dictionaries whose names match filter. | 235 obj.line, filename, linenum) |
140 | 236 failed_filters.append(t) |
141 @param filter: A regular expression string. | 237 return False |
142 """ | 238 elif obj.match(ctx, ctx_set): |
143 exp = self._get_filter_regex(filter) | 239 continue |
| 240 elif type(obj) is NoFilter: |
| 241 if obj.match(ctx, ctx_set): |
| 242 self._debug(" filter did not pass: %r (%s:%s)", |
| 243 obj.line, filename, linenum) |
| 244 failed_filters.append(t) |
| 245 return False |
| 246 elif not obj.might_match(ctx, ctx_set, labels): |
| 247 continue |
| 248 elif type(obj) is Condition: |
| 249 if obj.match(ctx, ctx_set): |
| 250 self._debug(" conditional block matches: %r (%s:%s)", |
| 251 obj.line, filename, linenum) |
| 252 # Check and unpack the content inside this Condition |
| 253 # object (note: the failed filters should go into |
| 254 # new_internal_filters because we don't expect them to |
| 255 # come from outside this node, even if the Condition |
| 256 # itself was external) |
| 257 if not process_content(obj.content, |
| 258 new_internal_filters): |
| 259 failed_filters.append(t) |
| 260 return False |
| 261 continue |
| 262 elif not obj.might_match(ctx, ctx_set, labels): |
| 263 continue |
| 264 new_content.append(t) |
| 265 return True |
| 266 |
| 267 def might_pass(failed_ctx, |
| 268 failed_ctx_set, |
| 269 failed_external_filters, |
| 270 failed_internal_filters): |
| 271 for t in failed_external_filters: |
| 272 if t not in content: |
| 273 return True |
| 274 filename, linenum, filter = t |
| 275 if filter.might_pass(failed_ctx, failed_ctx_set, ctx, ctx_set, |
| 276 labels): |
| 277 return True |
| 278 for t in failed_internal_filters: |
| 279 filename, linenum, filter = t |
| 280 if filter.might_pass(failed_ctx, failed_ctx_set, ctx, ctx_set, |
| 281 labels): |
| 282 return True |
| 283 return False |
| 284 |
| 285 def add_failed_case(): |
| 286 node.failed_cases.appendleft((ctx, ctx_set, |
| 287 new_external_filters, |
| 288 new_internal_filters)) |
| 289 if len(node.failed_cases) > num_failed_cases: |
| 290 node.failed_cases.pop() |
| 291 |
| 292 node = node or self.node |
| 293 # Update dep |
| 294 for d in node.dep: |
| 295 dep = dep + [".".join(ctx + [d])] |
| 296 # Update ctx |
| 297 ctx = ctx + node.name |
| 298 ctx_set = set(ctx) |
| 299 labels = node.labels |
| 300 # Get the current name |
| 301 name = ".".join(ctx) |
| 302 if node.name: |
| 303 self._debug("checking out %r", name) |
| 304 # Check previously failed filters |
| 305 for i, failed_case in enumerate(node.failed_cases): |
| 306 if not might_pass(*failed_case): |
| 307 self._debug(" this subtree has failed before") |
| 308 del node.failed_cases[i] |
| 309 node.failed_cases.appendleft(failed_case) |
| 310 return |
| 311 # Check content and unpack it into new_content |
| 312 new_content = [] |
| 313 new_external_filters = [] |
| 314 new_internal_filters = [] |
| 315 if (not process_content(node.content, new_internal_filters) or |
| 316 not process_content(content, new_external_filters)): |
| 317 add_failed_case() |
| 318 return |
| 319 # Update shortname |
| 320 if node.append_to_shortname: |
| 321 shortname = shortname + node.name |
| 322 # Recurse into children |
144 count = 0 | 323 count = 0 |
145 for a in self.list: | 324 for n in node.children: |
146 name = _array_get_name(a, self.object_cache) | 325 for d in self.get_dicts(n, ctx, new_content, shortname, dep): |
147 if exp.search(name): | |
148 count += 1 | 326 count += 1 |
149 return count | 327 yield d |
150 | 328 # Reached leaf? |
151 | 329 if not node.children: |
152 def parse_variants(self, cr, list, subvariants=False, prev_indent=-1): | 330 self._debug(" reached leaf, returning it") |
153 """ | 331 d = {"name": name, "dep": dep, "shortname": ".".join(shortname)} |
154 Read and parse lines from a configreader object until a line with an | 332 for filename, linenum, op in new_content: |
| 333 op.apply_to_dict(d, ctx, ctx_set) |
| 334 yield d |
| 335 # If this node did not produce any dicts, remember the failed filters |
| 336 # of its descendants |
| 337 elif not count: |
| 338 new_external_filters = [] |
| 339 new_internal_filters = [] |
| 340 for n in node.children: |
| 341 (failed_ctx, |
| 342 failed_ctx_set, |
| 343 failed_external_filters, |
| 344 failed_internal_filters) = n.failed_cases[0] |
| 345 for obj in failed_internal_filters: |
| 346 if obj not in new_internal_filters: |
| 347 new_internal_filters.append(obj) |
| 348 for obj in failed_external_filters: |
| 349 if obj in content: |
| 350 if obj not in new_external_filters: |
| 351 new_external_filters.append(obj) |
| 352 else: |
| 353 if obj not in new_internal_filters: |
| 354 new_internal_filters.append(obj) |
| 355 add_failed_case() |
| 356 |
| 357 |
| 358 def _debug(self, s, *args): |
| 359 if self.debug: |
| 360 s = "DEBUG: %s" % s |
| 361 print s % args |
| 362 |
| 363 |
| 364 def _warn(self, s, *args): |
| 365 s = "WARNING: %s" % s |
| 366 print s % args |
| 367 |
| 368 |
| 369 def _parse_variants(self, cr, node, prev_indent=-1): |
| 370 """ |
| 371 Read and parse lines from a FileReader object until a line with an |
155 indent level lower than or equal to prev_indent is encountered. | 372 indent level lower than or equal to prev_indent is encountered. |
156 | 373 |
157 @brief: Parse a 'variants' or 'subvariants' block from a configreader | 374 @param cr: A FileReader/StrReader object. |
158 object. | 375 @param node: A node to operate on. |
159 @param cr: configreader object to be parsed. | |
160 @param list: List of arrays to operate on. | |
161 @param subvariants: If True, parse in 'subvariants' mode; | |
162 otherwise parse in 'variants' mode. | |
163 @param prev_indent: The indent level of the "parent" block. | 376 @param prev_indent: The indent level of the "parent" block. |
164 @return: The resulting list of arrays. | 377 @return: A node object. |
165 """ | 378 """ |
166 new_list = [] | 379 node4 = Node() |
167 | 380 |
168 while True: | 381 while True: |
169 pos = cr.tell() | 382 line, indent, linenum = cr.get_next_line(prev_indent) |
170 (indented_line, line, indent) = cr.get_next_line() | 383 if not line: |
171 if indent <= prev_indent: | |
172 cr.seek(pos) | |
173 break | 384 break |
174 | 385 |
175 # Get name and dependencies | 386 name, dep = map(str.strip, line.lstrip("- ").split(":", 1)) |
176 (name, depend) = map(str.strip, line.lstrip("- ").split(":")) | 387 for char in name: |
177 | 388 if not (char.isalnum() or char in "@._-"): |
178 # See if name should be added to the 'shortname' field | 389 raise ParserError("Illegal characters in variant name", |
179 add_to_shortname = not name.startswith("@") | 390 line, cr.filename, linenum) |
180 name = name.lstrip("@") | 391 for char in dep: |
181 | 392 if not (char.isalnum() or char.isspace() or char in ".,_-"): |
182 # Store name and dependencies in cache and get their indices | 393 raise ParserError("Illegal characters in dependencies", |
183 n = self._store_str(name) | 394 line, cr.filename, linenum) |
184 d = self._store_str(depend) | 395 |
185 | 396 node2 = Node() |
186 # Make a copy of list | 397 node2.children = [node] |
187 temp_list = [a[:] for a in list] | 398 node2.labels = node.labels |
188 | 399 |
189 if subvariants: | 400 node3 = self._parse(cr, node2, prev_indent=indent) |
190 # If we're parsing 'subvariants', first modify the list | 401 node3.name = name.lstrip("@").split(".") |
191 if add_to_shortname: | 402 node3.dep = dep.replace(",", " ").split() |
192 for a in temp_list: | 403 node3.append_to_shortname = not name.startswith("@") |
193 _array_append_to_name_shortname_depend(a, n, d) | 404 |
194 else: | 405 node4.children += [node3] |
195 for a in temp_list: | 406 node4.labels.update(node3.labels) |
196 _array_append_to_name_depend(a, n, d) | 407 node4.labels.update(node3.name) |
197 temp_list = self.parse(cr, temp_list, restricted=True, | 408 |
198 prev_indent=indent) | 409 return node4 |
199 else: | 410 |
200 # If we're parsing 'variants', parse before modifying the list | 411 |
201 if self.debug: | 412 def _parse(self, cr, node, prev_indent=-1): |
202 _debug_print(indented_line, | 413 """ |
203 "Entering variant '%s' " | 414 Read and parse lines from a StrReader object until a line with an |
204 "(variant inherits %d dicts)" % | |
205 (name, len(list))) | |
206 temp_list = self.parse(cr, temp_list, restricted=False, | |
207 prev_indent=indent) | |
208 if add_to_shortname: | |
209 for a in temp_list: | |
210 _array_prepend_to_name_shortname_depend(a, n, d) | |
211 else: | |
212 for a in temp_list: | |
213 _array_prepend_to_name_depend(a, n, d) | |
214 | |
215 new_list += temp_list | |
216 | |
217 return new_list | |
218 | |
219 | |
220 def parse(self, cr, list, restricted=False, prev_indent=-1): | |
221 """ | |
222 Read and parse lines from a configreader object until a line with an | |
223 indent level lower than or equal to prev_indent is encountered. | 415 indent level lower than or equal to prev_indent is encountered. |
224 | 416 |
225 @brief: Parse a configreader object. | 417 @param cr: A FileReader/StrReader object. |
226 @param cr: A configreader object. | 418 @param node: A Node or a Condition object to operate on. |
227 @param list: A list of arrays to operate on (list is modified in | |
228 place and should not be used after the call). | |
229 @param restricted: If True, operate in restricted mode | |
230 (prohibit 'variants'). | |
231 @param prev_indent: The indent level of the "parent" block. | 419 @param prev_indent: The indent level of the "parent" block. |
232 @return: The resulting list of arrays. | 420 @return: A node object. |
233 @note: List is destroyed and should not be used after the call. | 421 """ |
234 Only the returned list should be used. | |
235 """ | |
236 current_block = "" | |
237 | |
238 while True: | 422 while True: |
239 pos = cr.tell() | 423 line, indent, linenum = cr.get_next_line(prev_indent) |
240 (indented_line, line, indent) = cr.get_next_line() | 424 if not line: |
241 if indent <= prev_indent: | |
242 cr.seek(pos) | |
243 self._append_content_to_arrays(list, current_block) | |
244 break | 425 break |
245 | 426 |
246 len_list = len(list) | 427 words = line.split(None, 1) |
247 | |
248 # Parse assignment operators (keep lines in temporary buffer) | |
249 if "=" in line: | |
250 if self.debug and not restricted: | |
251 _debug_print(indented_line, | |
252 "Parsing operator (%d dicts in current " | |
253 "context)" % len_list) | |
254 current_block += line + "\n" | |
255 continue | |
256 | |
257 # Flush the temporary buffer | |
258 self._append_content_to_arrays(list, current_block) | |
259 current_block = "" | |
260 | |
261 words = line.split() | |
262 | |
263 # Parse 'no' and 'only' statements | |
264 if words[0] == "no" or words[0] == "only": | |
265 if len(words) <= 1: | |
266 continue | |
267 filters = map(self._get_filter_regex, words[1:]) | |
268 filtered_list = [] | |
269 if words[0] == "no": | |
270 for a in list: | |
271 name = _array_get_name(a, self.object_cache) | |
272 for filter in filters: | |
273 if filter.search(name): | |
274 break | |
275 else: | |
276 filtered_list.append(a) | |
277 if words[0] == "only": | |
278 for a in list: | |
279 name = _array_get_name(a, self.object_cache) | |
280 for filter in filters: | |
281 if filter.search(name): | |
282 filtered_list.append(a) | |
283 break | |
284 list = filtered_list | |
285 if self.debug and not restricted: | |
286 _debug_print(indented_line, | |
287 "Parsing no/only (%d dicts in current " | |
288 "context, %d remain)" % | |
289 (len_list, len(list))) | |
290 continue | |
291 | 428 |
292 # Parse 'variants' | 429 # Parse 'variants' |
293 if line == "variants:": | 430 if line == "variants:": |
294 # 'variants' not allowed in restricted mode | 431 # 'variants' is not allowed inside a conditional block |
295 # (inside an exception or inside subvariants) | 432 if isinstance(node, Condition): |
296 if restricted: | 433 raise ParserError("'variants' is not allowed inside a " |
297 e_msg = "Using variants in this context is not allowed" | 434 "conditional block", |
298 cr.raise_error(e_msg) | 435 None, cr.filename, linenum) |
299 if self.debug and not restricted: | 436 node = self._parse_variants(cr, node, prev_indent=indent) |
300 _debug_print(indented_line, | |
301 "Entering variants block (%d dicts in " | |
302 "current context)" % len_list) | |
303 list = self.parse_variants(cr, list, subvariants=False, | |
304 prev_indent=indent) | |
305 continue | |
306 | |
307 # Parse 'subvariants' (the block is parsed for each dict | |
308 # separately) | |
309 if line == "subvariants:": | |
310 if self.debug and not restricted: | |
311 _debug_print(indented_line, | |
312 "Entering subvariants block (%d dicts in " | |
313 "current context)" % len_list) | |
314 new_list = [] | |
315 # Remember current position | |
316 pos = cr.tell() | |
317 # Read the lines in any case | |
318 self.parse_variants(cr, [], subvariants=True, | |
319 prev_indent=indent) | |
320 # Iterate over the list... | |
321 for index in xrange(len(list)): | |
322 # Revert to initial position in this 'subvariants' block | |
323 cr.seek(pos) | |
324 # Everything inside 'subvariants' should be parsed in | |
325 # restricted mode | |
326 new_list += self.parse_variants(cr, list[index:index+1], | |
327 subvariants=True, | |
328 prev_indent=indent) | |
329 list = new_list | |
330 continue | 437 continue |
331 | 438 |
332 # Parse 'include' statements | 439 # Parse 'include' statements |
333 if words[0] == "include": | 440 if words[0] == "include": |
334 if len(words) <= 1: | 441 if len(words) < 2: |
| 442 raise ParserError("Syntax error: missing parameter", |
| 443 line, cr.filename, linenum) |
| 444 if not isinstance(cr, FileReader): |
| 445 raise ParserError("Cannot include because no file is " |
| 446 "currently open", |
| 447 line, cr.filename, linenum) |
| 448 filename = os.path.join(os.path.dirname(cr.filename), words[1]) |
| 449 if not os.path.isfile(filename): |
| 450 self._warn("%r (%s:%s): file doesn't exist or is not a " |
| 451 "regular file", line, cr.filename, linenum) |
335 continue | 452 continue |
336 if self.debug and not restricted: | 453 node = self._parse(FileReader(filename), node) |
337 _debug_print(indented_line, "Entering file %s" % words[1]) | |
338 | |
339 cur_filename = cr.real_filename() | |
340 if cur_filename is None: | |
341 cr.raise_error("'include' is valid only when parsing a file"
) | |
342 | |
343 filename = os.path.join(os.path.dirname(cur_filename), | |
344 words[1]) | |
345 if not os.path.exists(filename): | |
346 cr.raise_error("Cannot include %s -- file not found" % (file
name)) | |
347 | |
348 str = open(filename).read() | |
349 list = self.parse(configreader(filename, str), list, restricted) | |
350 if self.debug and not restricted: | |
351 _debug_print("", "Leaving file %s" % words[1]) | |
352 | |
353 continue | 454 continue |
354 | 455 |
355 # Parse multi-line exceptions | 456 # Parse 'only' and 'no' filters |
356 # (the block is parsed for each dict separately) | 457 if words[0] in ("only", "no"): |
| 458 if len(words) < 2: |
| 459 raise ParserError("Syntax error: missing parameter", |
| 460 line, cr.filename, linenum) |
| 461 try: |
| 462 if words[0] == "only": |
| 463 f = OnlyFilter(line) |
| 464 elif words[0] == "no": |
| 465 f = NoFilter(line) |
| 466 except ParserError, e: |
| 467 e.line = line |
| 468 e.filename = cr.filename |
| 469 e.linenum = linenum |
| 470 raise |
| 471 node.content += [(cr.filename, linenum, f)] |
| 472 continue |
| 473 |
| 474 # Parse conditional blocks |
357 if line.endswith(":"): | 475 if line.endswith(":"): |
358 if self.debug and not restricted: | 476 try: |
359 _debug_print(indented_line, | 477 cond = Condition(line) |
360 "Entering multi-line exception block " | 478 except ParserError, e: |
361 "(%d dicts in current context outside " | 479 e.line = line |
362 "exception)" % len_list) | 480 e.filename = cr.filename |
363 line = line[:-1] | 481 e.linenum = linenum |
364 new_list = [] | 482 raise |
365 # Remember current position | 483 self._parse(cr, cond, prev_indent=indent) |
366 pos = cr.tell() | 484 node.content += [(cr.filename, linenum, cond)] |
367 # Read the lines in any case | |
368 self.parse(cr, [], restricted=True, prev_indent=indent) | |
369 # Iterate over the list... | |
370 exp = self._get_filter_regex(line) | |
371 for index in xrange(len(list)): | |
372 name = _array_get_name(list[index], self.object_cache) | |
373 if exp.search(name): | |
374 # Revert to initial position in this exception block | |
375 cr.seek(pos) | |
376 # Everything inside an exception should be parsed in | |
377 # restricted mode | |
378 new_list += self.parse(cr, list[index:index+1], | |
379 restricted=True, | |
380 prev_indent=indent) | |
381 else: | |
382 new_list.append(list[index]) | |
383 list = new_list | |
384 continue | 485 continue |
385 | 486 |
386 return list | 487 # Parse regular operators |
387 | 488 try: |
388 | 489 op = Op(line) |
389 def _get_filter_regex(self, filter): | 490 except ParserError, e: |
390 """ | 491 e.line = line |
391 Return a regex object corresponding to a given filter string. | 492 e.filename = cr.filename |
392 | 493 e.linenum = linenum |
393 All regular expressions given to the parser are passed through this | 494 raise |
394 function first. Its purpose is to make them more specific and better | 495 node.content += [(cr.filename, linenum, op)] |
395 suited to match dictionary names: it forces simple expressions to match | 496 |
396 only between dots or at the beginning or end of a string. For example, | 497 return node |
397 the filter 'foo' will match 'foo.bar' but not 'foobar'. | |
398 """ | |
399 try: | |
400 return self.regex_cache[filter] | |
401 except KeyError: | |
402 exp = re.compile(r"(\.|^)(%s)(\.|$)" % filter) | |
403 self.regex_cache[filter] = exp | |
404 return exp | |
405 | |
406 | |
407 def _store_str(self, str): | |
408 """ | |
409 Store str in the internal object cache, if it isn't already there, and | |
410 return its identifying index. | |
411 | |
412 @param str: String to store. | |
413 @return: The index of str in the object cache. | |
414 """ | |
415 try: | |
416 return self.object_cache_indices[str] | |
417 except KeyError: | |
418 self.object_cache.append(str) | |
419 index = len(self.object_cache) - 1 | |
420 self.object_cache_indices[str] = index | |
421 return index | |
422 | |
423 | |
424 def _append_content_to_arrays(self, list, content): | |
425 """ | |
426 Append content (config code containing assignment operations) to a list | |
427 of arrays. | |
428 | |
429 @param list: List of arrays to operate on. | |
430 @param content: String containing assignment operations. | |
431 """ | |
432 if content: | |
433 str_index = self._store_str(content) | |
434 for a in list: | |
435 _array_append_to_content(a, str_index) | |
436 | |
437 | |
438 def _apply_content_to_dict(self, dict, content): | |
439 """ | |
440 Apply the operations in content (config code containing assignment | |
441 operations) to a dict. | |
442 | |
443 @param dict: Dictionary to operate on. Must have 'name' key. | |
444 @param content: String containing assignment operations. | |
445 """ | |
446 for line in content.splitlines(): | |
447 op_found = None | |
448 op_pos = len(line) | |
449 for op in ops: | |
450 pos = line.find(op) | |
451 if pos >= 0 and pos < op_pos: | |
452 op_found = op | |
453 op_pos = pos | |
454 if not op_found: | |
455 continue | |
456 (left, value) = map(str.strip, line.split(op_found, 1)) | |
457 if value and ((value[0] == '"' and value[-1] == '"') or | |
458 (value[0] == "'" and value[-1] == "'")): | |
459 value = value[1:-1] | |
460 filters_and_key = map(str.strip, left.split(":")) | |
461 filters = filters_and_key[:-1] | |
462 key = filters_and_key[-1] | |
463 for filter in filters: | |
464 exp = self._get_filter_regex(filter) | |
465 if not exp.search(dict["name"]): | |
466 break | |
467 else: | |
468 ops[op_found](dict, key, value) | |
469 | 498 |
470 | 499 |
471 # Assignment operators | 500 # Assignment operators |
472 | 501 |
473 def _op_set(dict, key, value): | 502 _reserved_keys = set(("name", "shortname", "dep")) |
474 dict[key] = value | 503 |
475 | 504 |
476 | 505 def _op_set(d, key, value): |
477 def _op_append(dict, key, value): | 506 if key not in _reserved_keys: |
478 dict[key] = dict.get(key, "") + value | 507 d[key] = value |
479 | 508 |
480 | 509 |
481 def _op_prepend(dict, key, value): | 510 def _op_append(d, key, value): |
482 dict[key] = value + dict.get(key, "") | 511 if key not in _reserved_keys: |
483 | 512 d[key] = d.get(key, "") + value |
484 | 513 |
485 def _op_regex_set(dict, exp, value): | 514 |
486 exp = re.compile("^(%s)$" % exp) | 515 def _op_prepend(d, key, value): |
487 for key in dict: | 516 if key not in _reserved_keys: |
488 if exp.match(key): | 517 d[key] = value + d.get(key, "") |
489 dict[key] = value | 518 |
490 | 519 |
491 | 520 def _op_regex_set(d, exp, value): |
492 def _op_regex_append(dict, exp, value): | 521 exp = re.compile("%s$" % exp) |
493 exp = re.compile("^(%s)$" % exp) | 522 for key in d: |
494 for key in dict: | 523 if key not in _reserved_keys and exp.match(key): |
495 if exp.match(key): | 524 d[key] = value |
496 dict[key] += value | 525 |
497 | 526 |
498 | 527 def _op_regex_append(d, exp, value): |
499 def _op_regex_prepend(dict, exp, value): | 528 exp = re.compile("%s$" % exp) |
500 exp = re.compile("^(%s)$" % exp) | 529 for key in d: |
501 for key in dict: | 530 if key not in _reserved_keys and exp.match(key): |
502 if exp.match(key): | 531 d[key] += value |
503 dict[key] = value + dict[key] | 532 |
504 | 533 |
505 | 534 def _op_regex_prepend(d, exp, value): |
506 ops = { | 535 exp = re.compile("%s$" % exp) |
507 "=": _op_set, | 536 for key in d: |
508 "+=": _op_append, | 537 if key not in _reserved_keys and exp.match(key): |
509 "<=": _op_prepend, | 538 d[key] = value + d[key] |
510 "?=": _op_regex_set, | 539 |
511 "?+=": _op_regex_append, | 540 |
512 "?<=": _op_regex_prepend, | 541 def _op_regex_del(d, empty, exp): |
513 } | 542 exp = re.compile("%s$" % exp) |
514 | 543 for key in d.keys(): |
515 | 544 if key not in _reserved_keys and exp.match(key): |
516 # Misc functions | 545 del d[key] |
517 | 546 |
518 def _debug_print(str1, str2=""): | 547 |
| 548 _ops = {"=": (r"\=", _op_set), |
| 549 "+=": (r"\+\=", _op_append), |
| 550 "<=": (r"\<\=", _op_prepend), |
| 551 "?=": (r"\?\=", _op_regex_set), |
| 552 "?+=": (r"\?\+\=", _op_regex_append), |
| 553 "?<=": (r"\?\<\=", _op_regex_prepend), |
| 554 "del": (r"^del\b", _op_regex_del)} |
| 555 |
| 556 _ops_exp = re.compile("|".join([op[0] for op in _ops.values()])) |
| 557 |
| 558 |
| 559 class Op(object): |
| 560 def __init__(self, line): |
| 561 m = re.search(_ops_exp, line) |
| 562 if not m: |
| 563 raise ParserError("Syntax error: missing operator") |
| 564 left = line[:m.start()].strip() |
| 565 value = line[m.end():].strip() |
| 566 if value and ((value[0] == '"' and value[-1] == '"') or |
| 567 (value[0] == "'" and value[-1] == "'")): |
| 568 value = value[1:-1] |
| 569 filters_and_key = map(str.strip, left.split(":")) |
| 570 self.filters = [Filter(f) for f in filters_and_key[:-1]] |
| 571 self.key = filters_and_key[-1] |
| 572 self.value = value |
| 573 self.func = _ops[m.group()][1] |
| 574 |
| 575 |
| 576 def apply_to_dict(self, d, ctx, ctx_set): |
| 577 for f in self.filters: |
| 578 if not f.match(ctx, ctx_set): |
| 579 return |
| 580 self.func(d, self.key, self.value) |
| 581 |
| 582 |
| 583 # StrReader and FileReader |
| 584 |
| 585 class StrReader(object): |
519 """ | 586 """ |
520 Nicely print two strings and an arrow. | 587 Preprocess an input string for easy reading. |
521 | |
522 @param str1: First string. | |
523 @param str2: Second string. | |
524 """ | 588 """ |
525 if str2: | 589 def __init__(self, s): |
526 str = "%-50s ---> %s" % (str1, str2) | |
527 else: | |
528 str = str1 | |
529 logging.debug(str) | |
530 | |
531 | |
532 # configreader | |
533 | |
534 class configreader: | |
535 """ | |
536 Preprocess an input string and provide file-like services. | |
537 This is intended as a replacement for the file and StringIO classes, | |
538 whose readline() and/or seek() methods seem to be slow. | |
539 """ | |
540 | |
541 def __init__(self, filename, str, real_file=True): | |
542 """ | 590 """ |
543 Initialize the reader. | 591 Initialize the reader. |
544 | 592 |
545 @param filename: the filename we're parsing | 593 @param s: The string to parse. |
546 @param str: The string to parse. | 594 """ |
547 @param real_file: Indicates if filename represents a real file. Defaults
to True. | 595 self.filename = "<string>" |
548 """ | 596 self._lines = [] |
549 self.filename = filename | 597 self._line_index = 0 |
550 self.is_real_file = real_file | 598 for linenum, line in enumerate(s.splitlines()): |
551 self.line_index = 0 | |
552 self.lines = [] | |
553 self.real_number = [] | |
554 for num, line in enumerate(str.splitlines()): | |
555 line = line.rstrip().expandtabs() | 599 line = line.rstrip().expandtabs() |
556 stripped_line = line.strip() | 600 stripped_line = line.lstrip() |
557 indent = len(line) - len(stripped_line) | 601 indent = len(line) - len(stripped_line) |
558 if (not stripped_line | 602 if (not stripped_line |
559 or stripped_line.startswith("#") | 603 or stripped_line.startswith("#") |
560 or stripped_line.startswith("//")): | 604 or stripped_line.startswith("//")): |
561 continue | 605 continue |
562 self.lines.append((line, stripped_line, indent)) | 606 self._lines.append((stripped_line, indent, linenum + 1)) |
563 self.real_number.append(num + 1) | 607 |
564 | 608 |
565 | 609 def get_next_line(self, prev_indent): |
566 def real_filename(self): | 610 """ |
567 """Returns the filename we're reading, in case it is a real file | 611 Get the next non-empty, non-comment line in the string, whose |
568 | 612 indentation level is higher than prev_indent. |
569 @returns the filename we are parsing, or None in case we're not parsing
a real file | 613 |
570 """ | 614 @param prev_indent: The indentation level of the previous block. |
571 if self.is_real_file: | 615 @return: (line, indent, linenum), where indent is the line's |
572 return self.filename | 616 indentation level. If no line is available, (None, -1, -1) is |
573 | 617 returned. |
574 def get_next_line(self): | 618 """ |
575 """ | 619 if self._line_index >= len(self._lines): |
576 Get the next non-empty, non-comment line in the string. | 620 return None, -1, -1 |
577 | 621 line, indent, linenum = self._lines[self._line_index] |
578 @param file: File like object. | 622 if indent <= prev_indent: |
579 @return: (line, stripped_line, indent), where indent is the line's | 623 return None, -1, -1 |
580 indent level or -1 if no line is available. | 624 self._line_index += 1 |
581 """ | 625 return line, indent, linenum |
582 try: | 626 |
583 if self.line_index < len(self.lines): | 627 |
584 return self.lines[self.line_index] | 628 class FileReader(StrReader): |
585 else: | 629 """ |
586 return (None, None, -1) | 630 Preprocess an input file for easy reading. |
587 finally: | 631 """ |
588 self.line_index += 1 | 632 def __init__(self, filename): |
589 | 633 """ |
590 | 634 Initialize the reader. |
591 def tell(self): | 635 |
592 """ | 636 @parse filename: The name of the input file. |
593 Return the current line index. | 637 """ |
594 """ | 638 StrReader.__init__(self, open(filename).read()) |
595 return self.line_index | 639 self.filename = filename |
596 | 640 |
597 | 641 |
598 def seek(self, index): | 642 if __name__ == "__main__": |
599 """ | 643 parser = optparse.OptionParser("usage: %prog [options] <filename>") |
600 Set the current line index. | 644 parser.add_option("-v", "--verbose", dest="debug", action="store_true", |
601 """ | 645 help="include debug messages in console output") |
602 self.line_index = index | 646 parser.add_option("-f", "--fullname", dest="fullname", action="store_true", |
603 | 647 help="show full dict names instead of short names") |
604 def raise_error(self, msg): | 648 parser.add_option("-c", "--contents", dest="contents", action="store_true", |
605 """Raise an error related to the last line returned by get_next_line() | 649 help="show dict contents") |
606 """ | 650 |
607 if self.line_index == 0: # nothing was read. shouldn't happen, but... | 651 options, args = parser.parse_args() |
608 line_id = 'BEGIN' | 652 if not args: |
609 elif self.line_index >= len(self.lines): # past EOF | 653 parser.error("filename required") |
610 line_id = 'EOF' | 654 |
| 655 c = Parser(args[0], debug=options.debug) |
| 656 for i, d in enumerate(c.get_dicts()): |
| 657 if options.fullname: |
| 658 print "dict %4d: %s" % (i + 1, d["name"]) |
611 else: | 659 else: |
612 # line_index is the _next_ line. get the previous one | 660 print "dict %4d: %s" % (i + 1, d["shortname"]) |
613 line_id = str(self.real_number[self.line_index-1]) | 661 if options.contents: |
614 raise error.AutotestError("%s:%s: %s" % (self.filename, line_id, msg)) | 662 keys = d.keys() |
615 | 663 keys.sort() |
616 | 664 for key in keys: |
617 # Array structure: | 665 print " %s = %s" % (key, d[key]) |
618 # ---------------- | |
619 # The first 4 elements contain the indices of the 4 segments. | |
620 # a[0] -- Index of beginning of 'name' segment (always 4). | |
621 # a[1] -- Index of beginning of 'shortname' segment. | |
622 # a[2] -- Index of beginning of 'depend' segment. | |
623 # a[3] -- Index of beginning of 'content' segment. | |
624 # The next elements in the array comprise the aforementioned segments: | |
625 # The 'name' segment begins with a[a[0]] and ends with a[a[1]-1]. | |
626 # The 'shortname' segment begins with a[a[1]] and ends with a[a[2]-1]. | |
627 # The 'depend' segment begins with a[a[2]] and ends with a[a[3]-1]. | |
628 # The 'content' segment begins with a[a[3]] and ends at the end of the array. | |
629 | |
630 # The following functions append/prepend to various segments of an array. | |
631 | |
632 def _array_append_to_name_shortname_depend(a, name, depend): | |
633 a.insert(a[1], name) | |
634 a.insert(a[2] + 1, name) | |
635 a.insert(a[3] + 2, depend) | |
636 a[1] += 1 | |
637 a[2] += 2 | |
638 a[3] += 3 | |
639 | |
640 | |
641 def _array_prepend_to_name_shortname_depend(a, name, depend): | |
642 a[1] += 1 | |
643 a[2] += 2 | |
644 a[3] += 3 | |
645 a.insert(a[0], name) | |
646 a.insert(a[1], name) | |
647 a.insert(a[2], depend) | |
648 | |
649 | |
650 def _array_append_to_name_depend(a, name, depend): | |
651 a.insert(a[1], name) | |
652 a.insert(a[3] + 1, depend) | |
653 a[1] += 1 | |
654 a[2] += 1 | |
655 a[3] += 2 | |
656 | |
657 | |
658 def _array_prepend_to_name_depend(a, name, depend): | |
659 a[1] += 1 | |
660 a[2] += 1 | |
661 a[3] += 2 | |
662 a.insert(a[0], name) | |
663 a.insert(a[2], depend) | |
664 | |
665 | |
666 def _array_append_to_content(a, content): | |
667 a.append(content) | |
668 | |
669 | |
670 def _array_get_name(a, object_cache): | |
671 """ | |
672 Return the name of a dictionary represented by a given array. | |
673 | |
674 @param a: Array representing a dictionary. | |
675 @param object_cache: A list of strings referenced by elements in the array. | |
676 """ | |
677 return ".".join([object_cache[i] for i in a[a[0]:a[1]]]) | |
678 | |
679 | |
680 def _array_get_all(a, object_cache): | |
681 """ | |
682 Return a 4-tuple containing all the data stored in a given array, in a | |
683 format that is easy to turn into an actual dictionary. | |
684 | |
685 @param a: Array representing a dictionary. | |
686 @param object_cache: A list of strings referenced by elements in the array. | |
687 @return: A 4-tuple: (name, shortname, depend, content), in which all | |
688 members are strings except depend which is a list of strings. | |
689 """ | |
690 name = ".".join([object_cache[i] for i in a[a[0]:a[1]]]) | |
691 shortname = ".".join([object_cache[i] for i in a[a[1]:a[2]]]) | |
692 content = "".join([object_cache[i] for i in a[a[3]:]]) | |
693 depend = [] | |
694 prefix = "" | |
695 for n, d in zip(a[a[0]:a[1]], a[a[2]:a[3]]): | |
696 for dep in object_cache[d].split(): | |
697 depend.append(prefix + dep) | |
698 prefix += object_cache[n] + "." | |
699 return name, shortname, depend, content | |
700 | |
701 | |
702 if __name__ == "__main__": | |
703 parser = optparse.OptionParser("usage: %prog [options] [filename]") | |
704 parser.add_option('--verbose', dest="debug", action='store_true', | |
705 help='include debug messages in console output') | |
706 | |
707 options, args = parser.parse_args() | |
708 debug = options.debug | |
709 if args: | |
710 filenames = args | |
711 else: | |
712 filenames = [os.path.join(os.path.dirname(sys.argv[0]), "tests.cfg")] | |
713 | |
714 # Here we configure the stand alone program to use the autotest | |
715 # logging system. | |
716 logging_manager.configure_logging(kvm_utils.KvmLoggingConfig(), | |
717 verbose=debug) | |
718 cfg = config(debug=debug) | |
719 for fn in filenames: | |
720 cfg.parse_file(fn) | |
721 dicts = cfg.get_generator() | |
722 for i, dict in enumerate(dicts): | |
723 print "Dictionary #%d:" % (i) | |
724 keys = dict.keys() | |
725 keys.sort() | |
726 for key in keys: | |
727 print " %s = %s" % (key, dict[key]) | |
OLD | NEW |