Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: third_party/google-endpoints/ply/cpp.py

Issue 2666783008: Add google-endpoints to third_party/. (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # -----------------------------------------------------------------------------
2 # cpp.py
3 #
4 # Author: David Beazley (http://www.dabeaz.com)
5 # Copyright (C) 2007
6 # All rights reserved
7 #
8 # This module implements an ANSI-C style lexical preprocessor for PLY.
9 # -----------------------------------------------------------------------------
10 from __future__ import generators
11
12 import sys
13
14 # Some Python 3 compatibility shims
15 if sys.version_info.major < 3:
16 STRING_TYPES = (str, unicode)
17 else:
18 STRING_TYPES = str
19 xrange = range
20
21 # -----------------------------------------------------------------------------
22 # Default preprocessor lexer definitions. These tokens are enough to get
23 # a basic preprocessor working. Other modules may import these if they want
24 # -----------------------------------------------------------------------------
25
26 tokens = (
27 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP _COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
28 )
29
30 literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
31
32 # Whitespace
33 def t_CPP_WS(t):
34 r'\s+'
35 t.lexer.lineno += t.value.count("\n")
36 return t
37
38 t_CPP_POUND = r'\#'
39 t_CPP_DPOUND = r'\#\#'
40
41 # Identifier
42 t_CPP_ID = r'[A-Za-z_][\w_]*'
43
44 # Integer literal
45 def CPP_INTEGER(t):
46 r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
47 return t
48
49 t_CPP_INTEGER = CPP_INTEGER
50
51 # Floating literal
52 t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
53
54 # String literal
55 def t_CPP_STRING(t):
56 r'\"([^\\\n]|(\\(.|\n)))*?\"'
57 t.lexer.lineno += t.value.count("\n")
58 return t
59
60 # Character constant 'c' or L'c'
61 def t_CPP_CHAR(t):
62 r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
63 t.lexer.lineno += t.value.count("\n")
64 return t
65
66 # Comment
67 def t_CPP_COMMENT1(t):
68 r'(/\*(.|\n)*?\*/)'
69 ncr = t.value.count("\n")
70 t.lexer.lineno += ncr
71 # replace with one space or a number of '\n'
72 t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
73 return t
74
75 # Line comment
76 def t_CPP_COMMENT2(t):
77 r'(//.*?(\n|$))'
78 # replace with '/n'
79 t.type = 'CPP_WS'; t.value = '\n'
80
81 def t_error(t):
82 t.type = t.value[0]
83 t.value = t.value[0]
84 t.lexer.skip(1)
85 return t
86
87 import re
88 import copy
89 import time
90 import os.path
91
92 # -----------------------------------------------------------------------------
93 # trigraph()
94 #
95 # Given an input string, this function replaces all trigraph sequences.
96 # The following mapping is used:
97 #
98 # ??= #
99 # ??/ \
100 # ??' ^
101 # ??( [
102 # ??) ]
103 # ??! |
104 # ??< {
105 # ??> }
106 # ??- ~
107 # -----------------------------------------------------------------------------
108
109 _trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
110 _trigraph_rep = {
111 '=':'#',
112 '/':'\\',
113 "'":'^',
114 '(':'[',
115 ')':']',
116 '!':'|',
117 '<':'{',
118 '>':'}',
119 '-':'~'
120 }
121
122 def trigraph(input):
123 return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
124
125 # ------------------------------------------------------------------
126 # Macro object
127 #
128 # This object holds information about preprocessor macros
129 #
130 # .name - Macro name (string)
131 # .value - Macro value (a list of tokens)
132 # .arglist - List of argument names
133 # .variadic - Boolean indicating whether or not variadic macro
134 # .vararg - Name of the variadic parameter
135 #
136 # When a macro is created, the macro replacement token sequence is
137 # pre-scanned and used to create patch lists that are later used
138 # during macro expansion
139 # ------------------------------------------------------------------
140
141 class Macro(object):
142 def __init__(self,name,value,arglist=None,variadic=False):
143 self.name = name
144 self.value = value
145 self.arglist = arglist
146 self.variadic = variadic
147 if variadic:
148 self.vararg = arglist[-1]
149 self.source = None
150
151 # ------------------------------------------------------------------
152 # Preprocessor object
153 #
154 # Object representing a preprocessor. Contains macro definitions,
155 # include directories, and other information
156 # ------------------------------------------------------------------
157
158 class Preprocessor(object):
159 def __init__(self,lexer=None):
160 if lexer is None:
161 lexer = lex.lexer
162 self.lexer = lexer
163 self.macros = { }
164 self.path = []
165 self.temp_path = []
166
167 # Probe the lexer for selected tokens
168 self.lexprobe()
169
170 tm = time.localtime()
171 self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
172 self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
173 self.parser = None
174
175 # -------------------------------------------------------------------------- ---
176 # tokenize()
177 #
178 # Utility function. Given a string of text, tokenize into a list of tokens
179 # -------------------------------------------------------------------------- ---
180
181 def tokenize(self,text):
182 tokens = []
183 self.lexer.input(text)
184 while True:
185 tok = self.lexer.token()
186 if not tok: break
187 tokens.append(tok)
188 return tokens
189
190 # ---------------------------------------------------------------------
191 # error()
192 #
193 # Report a preprocessor error/warning of some kind
194 # ----------------------------------------------------------------------
195
196 def error(self,file,line,msg):
197 print("%s:%d %s" % (file,line,msg))
198
199 # ----------------------------------------------------------------------
200 # lexprobe()
201 #
202 # This method probes the preprocessor lexer object to discover
203 # the token types of symbols that are important to the preprocessor.
204 # If this works right, the preprocessor will simply "work"
205 # with any suitable lexer regardless of how tokens have been named.
206 # ----------------------------------------------------------------------
207
208 def lexprobe(self):
209
210 # Determine the token type for identifiers
211 self.lexer.input("identifier")
212 tok = self.lexer.token()
213 if not tok or tok.value != "identifier":
214 print("Couldn't determine identifier type")
215 else:
216 self.t_ID = tok.type
217
218 # Determine the token type for integers
219 self.lexer.input("12345")
220 tok = self.lexer.token()
221 if not tok or int(tok.value) != 12345:
222 print("Couldn't determine integer type")
223 else:
224 self.t_INTEGER = tok.type
225 self.t_INTEGER_TYPE = type(tok.value)
226
227 # Determine the token type for strings enclosed in double quotes
228 self.lexer.input("\"filename\"")
229 tok = self.lexer.token()
230 if not tok or tok.value != "\"filename\"":
231 print("Couldn't determine string type")
232 else:
233 self.t_STRING = tok.type
234
235 # Determine the token type for whitespace--if any
236 self.lexer.input(" ")
237 tok = self.lexer.token()
238 if not tok or tok.value != " ":
239 self.t_SPACE = None
240 else:
241 self.t_SPACE = tok.type
242
243 # Determine the token type for newlines
244 self.lexer.input("\n")
245 tok = self.lexer.token()
246 if not tok or tok.value != "\n":
247 self.t_NEWLINE = None
248 print("Couldn't determine token for newlines")
249 else:
250 self.t_NEWLINE = tok.type
251
252 self.t_WS = (self.t_SPACE, self.t_NEWLINE)
253
254 # Check for other characters used by the preprocessor
255 chars = [ '<','>','#','##','\\','(',')',',','.']
256 for c in chars:
257 self.lexer.input(c)
258 tok = self.lexer.token()
259 if not tok or tok.value != c:
260 print("Unable to lex '%s' required for preprocessor" % c)
261
262 # ----------------------------------------------------------------------
263 # add_path()
264 #
265 # Adds a search path to the preprocessor.
266 # ----------------------------------------------------------------------
267
268 def add_path(self,path):
269 self.path.append(path)
270
271 # ----------------------------------------------------------------------
272 # group_lines()
273 #
274 # Given an input string, this function splits it into lines. Trailing white space
275 # is removed. Any line ending with \ is grouped with the next line. This
276 # function forms the lowest level of the preprocessor---grouping into text i nto
277 # a line-by-line format.
278 # ----------------------------------------------------------------------
279
280 def group_lines(self,input):
281 lex = self.lexer.clone()
282 lines = [x.rstrip() for x in input.splitlines()]
283 for i in xrange(len(lines)):
284 j = i+1
285 while lines[i].endswith('\\') and (j < len(lines)):
286 lines[i] = lines[i][:-1]+lines[j]
287 lines[j] = ""
288 j += 1
289
290 input = "\n".join(lines)
291 lex.input(input)
292 lex.lineno = 1
293
294 current_line = []
295 while True:
296 tok = lex.token()
297 if not tok:
298 break
299 current_line.append(tok)
300 if tok.type in self.t_WS and '\n' in tok.value:
301 yield current_line
302 current_line = []
303
304 if current_line:
305 yield current_line
306
307 # ----------------------------------------------------------------------
308 # tokenstrip()
309 #
310 # Remove leading/trailing whitespace tokens from a token list
311 # ----------------------------------------------------------------------
312
313 def tokenstrip(self,tokens):
314 i = 0
315 while i < len(tokens) and tokens[i].type in self.t_WS:
316 i += 1
317 del tokens[:i]
318 i = len(tokens)-1
319 while i >= 0 and tokens[i].type in self.t_WS:
320 i -= 1
321 del tokens[i+1:]
322 return tokens
323
324
325 # ----------------------------------------------------------------------
326 # collect_args()
327 #
328 # Collects comma separated arguments from a list of tokens. The arguments
329 # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positio ns)
330 # where tokencount is the number of tokens consumed, args is a list of argum ents,
331 # and positions is a list of integers containing the starting index of each
332 # argument. Each argument is represented by a list of tokens.
333 #
334 # When collecting arguments, leading and trailing whitespace is removed
335 # from each argument.
336 #
337 # This function properly handles nested parenthesis and commas---these do no t
338 # define new arguments.
339 # ----------------------------------------------------------------------
340
341 def collect_args(self,tokenlist):
342 args = []
343 positions = []
344 current_arg = []
345 nesting = 1
346 tokenlen = len(tokenlist)
347
348 # Search for the opening '('.
349 i = 0
350 while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
351 i += 1
352
353 if (i < tokenlen) and (tokenlist[i].value == '('):
354 positions.append(i+1)
355 else:
356 self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arg uments")
357 return 0, [], []
358
359 i += 1
360
361 while i < tokenlen:
362 t = tokenlist[i]
363 if t.value == '(':
364 current_arg.append(t)
365 nesting += 1
366 elif t.value == ')':
367 nesting -= 1
368 if nesting == 0:
369 if current_arg:
370 args.append(self.tokenstrip(current_arg))
371 positions.append(i)
372 return i+1,args,positions
373 current_arg.append(t)
374 elif t.value == ',' and nesting == 1:
375 args.append(self.tokenstrip(current_arg))
376 positions.append(i+1)
377 current_arg = []
378 else:
379 current_arg.append(t)
380 i += 1
381
382 # Missing end argument
383 self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro argume nts")
384 return 0, [],[]
385
386 # ----------------------------------------------------------------------
387 # macro_prescan()
388 #
389 # Examine the macro value (token sequence) and identify patch points
390 # This is used to speed up macro expansion later on---we'll know
391 # right away where to apply patches to the value to form the expansion
392 # ----------------------------------------------------------------------
393
394 def macro_prescan(self,macro):
395 macro.patch = [] # Standard macro arguments
396 macro.str_patch = [] # String conversion expansion
397 macro.var_comma_patch = [] # Variadic macro comma patch
398 i = 0
399 while i < len(macro.value):
400 if macro.value[i].type == self.t_ID and macro.value[i].value in macr o.arglist:
401 argnum = macro.arglist.index(macro.value[i].value)
402 # Conversion of argument to a string
403 if i > 0 and macro.value[i-1].value == '#':
404 macro.value[i] = copy.copy(macro.value[i])
405 macro.value[i].type = self.t_STRING
406 del macro.value[i-1]
407 macro.str_patch.append((argnum,i-1))
408 continue
409 # Concatenation
410 elif (i > 0 and macro.value[i-1].value == '##'):
411 macro.patch.append(('c',argnum,i-1))
412 del macro.value[i-1]
413 continue
414 elif ((i+1) < len(macro.value) and macro.value[i+1].value == '## '):
415 macro.patch.append(('c',argnum,i))
416 i += 1
417 continue
418 # Standard expansion
419 else:
420 macro.patch.append(('e',argnum,i))
421 elif macro.value[i].value == '##':
422 if macro.variadic and (i > 0) and (macro.value[i-1].value == ',' ) and \
423 ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
424 (macro.value[i+1].value == macro.vararg):
425 macro.var_comma_patch.append(i-1)
426 i += 1
427 macro.patch.sort(key=lambda x: x[2],reverse=True)
428
429 # ----------------------------------------------------------------------
430 # macro_expand_args()
431 #
432 # Given a Macro and list of arguments (each a token list), this method
433 # returns an expanded version of a macro. The return value is a token seque nce
434 # representing the replacement macro tokens
435 # ----------------------------------------------------------------------
436
437 def macro_expand_args(self,macro,args):
438 # Make a copy of the macro token sequence
439 rep = [copy.copy(_x) for _x in macro.value]
440
441 # Make string expansion patches. These do not alter the length of the r eplacement sequence
442
443 str_expansion = {}
444 for argnum, i in macro.str_patch:
445 if argnum not in str_expansion:
446 str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args [argnum]])).replace("\\","\\\\")
447 rep[i] = copy.copy(rep[i])
448 rep[i].value = str_expansion[argnum]
449
450 # Make the variadic macro comma patch. If the variadic macro argument i s empty, we get rid
451 comma_patch = False
452 if macro.variadic and not args[-1]:
453 for i in macro.var_comma_patch:
454 rep[i] = None
455 comma_patch = True
456
457 # Make all other patches. The order of these matters. It is assumed t hat the patch list
458 # has been sorted in reverse order of patch location since replacements will cause the
459 # size of the replacement sequence to expand from the patch point.
460
461 expanded = { }
462 for ptype, argnum, i in macro.patch:
463 # Concatenation. Argument is left unexpanded
464 if ptype == 'c':
465 rep[i:i+1] = args[argnum]
466 # Normal expansion. Argument is macro expanded first
467 elif ptype == 'e':
468 if argnum not in expanded:
469 expanded[argnum] = self.expand_macros(args[argnum])
470 rep[i:i+1] = expanded[argnum]
471
472 # Get rid of removed comma if necessary
473 if comma_patch:
474 rep = [_i for _i in rep if _i]
475
476 return rep
477
478
479 # ----------------------------------------------------------------------
480 # expand_macros()
481 #
482 # Given a list of tokens, this function performs macro expansion.
483 # The expanded argument is a dictionary that contains macros already
484 # expanded. This is used to prevent infinite recursion.
485 # ----------------------------------------------------------------------
486
487 def expand_macros(self,tokens,expanded=None):
488 if expanded is None:
489 expanded = {}
490 i = 0
491 while i < len(tokens):
492 t = tokens[i]
493 if t.type == self.t_ID:
494 if t.value in self.macros and t.value not in expanded:
495 # Yes, we found a macro match
496 expanded[t.value] = True
497
498 m = self.macros[t.value]
499 if not m.arglist:
500 # A simple macro
501 ex = self.expand_macros([copy.copy(_x) for _x in m.value ],expanded)
502 for e in ex:
503 e.lineno = t.lineno
504 tokens[i:i+1] = ex
505 i += len(ex)
506 else:
507 # A macro with arguments
508 j = i + 1
509 while j < len(tokens) and tokens[j].type in self.t_WS:
510 j += 1
511 if tokens[j].value == '(':
512 tokcount,args,positions = self.collect_args(tokens[j :])
513 if not m.variadic and len(args) != len(m.arglist):
514 self.error(self.source,t.lineno,"Macro %s requir es %d arguments" % (t.value,len(m.arglist)))
515 i = j + tokcount
516 elif m.variadic and len(args) < len(m.arglist)-1:
517 if len(m.arglist) > 2:
518 self.error(self.source,t.lineno,"Macro %s mu st have at least %d arguments" % (t.value, len(m.arglist)-1))
519 else:
520 self.error(self.source,t.lineno,"Macro %s mu st have at least %d argument" % (t.value, len(m.arglist)-1))
521 i = j + tokcount
522 else:
523 if m.variadic:
524 if len(args) == len(m.arglist)-1:
525 args.append([])
526 else:
527 args[len(m.arglist)-1] = tokens[j+positi ons[len(m.arglist)-1]:j+tokcount-1]
528 del args[len(m.arglist):]
529
530 # Get macro replacement text
531 rep = self.macro_expand_args(m,args)
532 rep = self.expand_macros(rep,expanded)
533 for r in rep:
534 r.lineno = t.lineno
535 tokens[i:j+tokcount] = rep
536 i += len(rep)
537 del expanded[t.value]
538 continue
539 elif t.value == '__LINE__':
540 t.type = self.t_INTEGER
541 t.value = self.t_INTEGER_TYPE(t.lineno)
542
543 i += 1
544 return tokens
545
546 # ----------------------------------------------------------------------
547 # evalexpr()
548 #
549 # Evaluate an expression token sequence for the purposes of evaluating
550 # integral expressions.
551 # ----------------------------------------------------------------------
552
553 def evalexpr(self,tokens):
554 # tokens = tokenize(line)
555 # Search for defined macros
556 i = 0
557 while i < len(tokens):
558 if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
559 j = i + 1
560 needparen = False
561 result = "0L"
562 while j < len(tokens):
563 if tokens[j].type in self.t_WS:
564 j += 1
565 continue
566 elif tokens[j].type == self.t_ID:
567 if tokens[j].value in self.macros:
568 result = "1L"
569 else:
570 result = "0L"
571 if not needparen: break
572 elif tokens[j].value == '(':
573 needparen = True
574 elif tokens[j].value == ')':
575 break
576 else:
577 self.error(self.source,tokens[i].lineno,"Malformed defin ed()")
578 j += 1
579 tokens[i].type = self.t_INTEGER
580 tokens[i].value = self.t_INTEGER_TYPE(result)
581 del tokens[i+1:j+1]
582 i += 1
583 tokens = self.expand_macros(tokens)
584 for i,t in enumerate(tokens):
585 if t.type == self.t_ID:
586 tokens[i] = copy.copy(t)
587 tokens[i].type = self.t_INTEGER
588 tokens[i].value = self.t_INTEGER_TYPE("0L")
589 elif t.type == self.t_INTEGER:
590 tokens[i] = copy.copy(t)
591 # Strip off any trailing suffixes
592 tokens[i].value = str(tokens[i].value)
593 while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
594 tokens[i].value = tokens[i].value[:-1]
595
596 expr = "".join([str(x.value) for x in tokens])
597 expr = expr.replace("&&"," and ")
598 expr = expr.replace("||"," or ")
599 expr = expr.replace("!"," not ")
600 try:
601 result = eval(expr)
602 except Exception:
603 self.error(self.source,tokens[0].lineno,"Couldn't evaluate expressio n")
604 result = 0
605 return result
606
607 # ----------------------------------------------------------------------
608 # parsegen()
609 #
610 # Parse an input string/
611 # ----------------------------------------------------------------------
612 def parsegen(self,input,source=None):
613
614 # Replace trigraph sequences
615 t = trigraph(input)
616 lines = self.group_lines(t)
617
618 if not source:
619 source = ""
620
621 self.define("__FILE__ \"%s\"" % source)
622
623 self.source = source
624 chunk = []
625 enable = True
626 iftrigger = False
627 ifstack = []
628
629 for x in lines:
630 for i,tok in enumerate(x):
631 if tok.type not in self.t_WS: break
632 if tok.value == '#':
633 # Preprocessor directive
634
635 # insert necessary whitespace instead of eaten tokens
636 for tok in x:
637 if tok.type in self.t_WS and '\n' in tok.value:
638 chunk.append(tok)
639
640 dirtokens = self.tokenstrip(x[i+1:])
641 if dirtokens:
642 name = dirtokens[0].value
643 args = self.tokenstrip(dirtokens[1:])
644 else:
645 name = ""
646 args = []
647
648 if name == 'define':
649 if enable:
650 for tok in self.expand_macros(chunk):
651 yield tok
652 chunk = []
653 self.define(args)
654 elif name == 'include':
655 if enable:
656 for tok in self.expand_macros(chunk):
657 yield tok
658 chunk = []
659 oldfile = self.macros['__FILE__']
660 for tok in self.include(args):
661 yield tok
662 self.macros['__FILE__'] = oldfile
663 self.source = source
664 elif name == 'undef':
665 if enable:
666 for tok in self.expand_macros(chunk):
667 yield tok
668 chunk = []
669 self.undef(args)
670 elif name == 'ifdef':
671 ifstack.append((enable,iftrigger))
672 if enable:
673 if not args[0].value in self.macros:
674 enable = False
675 iftrigger = False
676 else:
677 iftrigger = True
678 elif name == 'ifndef':
679 ifstack.append((enable,iftrigger))
680 if enable:
681 if args[0].value in self.macros:
682 enable = False
683 iftrigger = False
684 else:
685 iftrigger = True
686 elif name == 'if':
687 ifstack.append((enable,iftrigger))
688 if enable:
689 result = self.evalexpr(args)
690 if not result:
691 enable = False
692 iftrigger = False
693 else:
694 iftrigger = True
695 elif name == 'elif':
696 if ifstack:
697 if ifstack[-1][0]: # We only pay attention if outer "if" allows this
698 if enable: # If already true, we flip enable False
699 enable = False
700 elif not iftrigger: # If False, but not triggered yet, we'll check expression
701 result = self.evalexpr(args)
702 if result:
703 enable = True
704 iftrigger = True
705 else:
706 self.error(self.source,dirtokens[0].lineno,"Misplaced #e lif")
707
708 elif name == 'else':
709 if ifstack:
710 if ifstack[-1][0]:
711 if enable:
712 enable = False
713 elif not iftrigger:
714 enable = True
715 iftrigger = True
716 else:
717 self.error(self.source,dirtokens[0].lineno,"Misplaced #e lse")
718
719 elif name == 'endif':
720 if ifstack:
721 enable,iftrigger = ifstack.pop()
722 else:
723 self.error(self.source,dirtokens[0].lineno,"Misplaced #e ndif")
724 else:
725 # Unknown preprocessor directive
726 pass
727
728 else:
729 # Normal text
730 if enable:
731 chunk.extend(x)
732
733 for tok in self.expand_macros(chunk):
734 yield tok
735 chunk = []
736
737 # ----------------------------------------------------------------------
738 # include()
739 #
740 # Implementation of file-inclusion
741 # ----------------------------------------------------------------------
742
743 def include(self,tokens):
744 # Try to extract the filename and then process an include file
745 if not tokens:
746 return
747 if tokens:
748 if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
749 tokens = self.expand_macros(tokens)
750
751 if tokens[0].value == '<':
752 # Include <...>
753 i = 1
754 while i < len(tokens):
755 if tokens[i].value == '>':
756 break
757 i += 1
758 else:
759 print("Malformed #include <...>")
760 return
761 filename = "".join([x.value for x in tokens[1:i]])
762 path = self.path + [""] + self.temp_path
763 elif tokens[0].type == self.t_STRING:
764 filename = tokens[0].value[1:-1]
765 path = self.temp_path + [""] + self.path
766 else:
767 print("Malformed #include statement")
768 return
769 for p in path:
770 iname = os.path.join(p,filename)
771 try:
772 data = open(iname,"r").read()
773 dname = os.path.dirname(iname)
774 if dname:
775 self.temp_path.insert(0,dname)
776 for tok in self.parsegen(data,filename):
777 yield tok
778 if dname:
779 del self.temp_path[0]
780 break
781 except IOError:
782 pass
783 else:
784 print("Couldn't find '%s'" % filename)
785
786 # ----------------------------------------------------------------------
787 # define()
788 #
789 # Define a new macro
790 # ----------------------------------------------------------------------
791
792 def define(self,tokens):
793 if isinstance(tokens,STRING_TYPES):
794 tokens = self.tokenize(tokens)
795
796 linetok = tokens
797 try:
798 name = linetok[0]
799 if len(linetok) > 1:
800 mtype = linetok[1]
801 else:
802 mtype = None
803 if not mtype:
804 m = Macro(name.value,[])
805 self.macros[name.value] = m
806 elif mtype.type in self.t_WS:
807 # A normal macro
808 m = Macro(name.value,self.tokenstrip(linetok[2:]))
809 self.macros[name.value] = m
810 elif mtype.value == '(':
811 # A macro with arguments
812 tokcount, args, positions = self.collect_args(linetok[1:])
813 variadic = False
814 for a in args:
815 if variadic:
816 print("No more arguments may follow a variadic argument" )
817 break
818 astr = "".join([str(_i.value) for _i in a])
819 if astr == "...":
820 variadic = True
821 a[0].type = self.t_ID
822 a[0].value = '__VA_ARGS__'
823 variadic = True
824 del a[1:]
825 continue
826 elif astr[-3:] == "..." and a[0].type == self.t_ID:
827 variadic = True
828 del a[1:]
829 # If, for some reason, "." is part of the identifier, st rip off the name for the purposes
830 # of macro expansion
831 if a[0].value[-3:] == '...':
832 a[0].value = a[0].value[:-3]
833 continue
834 if len(a) > 1 or a[0].type != self.t_ID:
835 print("Invalid macro argument")
836 break
837 else:
838 mvalue = self.tokenstrip(linetok[1+tokcount:])
839 i = 0
840 while i < len(mvalue):
841 if i+1 < len(mvalue):
842 if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
843 del mvalue[i]
844 continue
845 elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
846 del mvalue[i+1]
847 i += 1
848 m = Macro(name.value,mvalue,[x[0].value for x in args],varia dic)
849 self.macro_prescan(m)
850 self.macros[name.value] = m
851 else:
852 print("Bad macro definition")
853 except LookupError:
854 print("Bad macro definition")
855
856 # ----------------------------------------------------------------------
857 # undef()
858 #
859 # Undefine a macro
860 # ----------------------------------------------------------------------
861
862 def undef(self,tokens):
863 id = tokens[0].value
864 try:
865 del self.macros[id]
866 except LookupError:
867 pass
868
869 # ----------------------------------------------------------------------
870 # parse()
871 #
872 # Parse input text.
873 # ----------------------------------------------------------------------
874 def parse(self,input,source=None,ignore={}):
875 self.ignore = ignore
876 self.parser = self.parsegen(input,source)
877
878 # ----------------------------------------------------------------------
879 # token()
880 #
881 # Method to return individual tokens
882 # ----------------------------------------------------------------------
883 def token(self):
884 try:
885 while True:
886 tok = next(self.parser)
887 if tok.type not in self.ignore: return tok
888 except StopIteration:
889 self.parser = None
890 return None
891
892 if __name__ == '__main__':
893 import ply.lex as lex
894 lexer = lex.lex()
895
896 # Run a preprocessor
897 import sys
898 f = open(sys.argv[1])
899 input = f.read()
900
901 p = Preprocessor(lexer)
902 p.parse(input,sys.argv[1])
903 while True:
904 tok = p.token()
905 if not tok: break
906 print(p.source, tok)
907
908
909
910
911
912
913
914
915
916
917
OLDNEW
« no previous file with comments | « third_party/google-endpoints/ply/__init__.py ('k') | third_party/google-endpoints/ply/ctokens.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698