Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: tools/lexer_generator/generator.py

Issue 69293005: Experimental parser: add catch all rule (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 # Copyright 2013 the V8 project authors. All rights reserved. 1 # Copyright 2013 the V8 project authors. All rights reserved.
2 # Redistribution and use in source and binary forms, with or without 2 # Redistribution and use in source and binary forms, with or without
3 # modification, are permitted provided that the following conditions are 3 # modification, are permitted provided that the following conditions are
4 # met: 4 # met:
5 # 5 #
6 # * Redistributions of source code must retain the above copyright 6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer. 7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above 8 # * Redistributions in binary form must reproduce the above
9 # copyright notice, this list of conditions and the following 9 # copyright notice, this list of conditions and the following
10 # disclaimer in the documentation and/or other materials provided 10 # disclaimer in the documentation and/or other materials provided
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
84 body = "\n".join(scripts) + (load_outer_template % "\n".join(loads)) 84 body = "\n".join(scripts) + (load_outer_template % "\n".join(loads))
85 return file_template % body 85 return file_template % body
86 86
87 def process_rules(self, parser_state): 87 def process_rules(self, parser_state):
88 rule_map = {} 88 rule_map = {}
89 builder = NfaBuilder() 89 builder = NfaBuilder()
90 builder.set_character_classes(parser_state.character_classes) 90 builder.set_character_classes(parser_state.character_classes)
91 assert 'default' in parser_state.rules 91 assert 'default' in parser_state.rules
92 def process(k, v): 92 def process(k, v):
93 graphs = [] 93 graphs = []
94 continues = 0
94 for (graph, (precedence, code, transition)) in v['regex']: 95 for (graph, (precedence, code, transition)) in v['regex']:
95 default_code = v['default_action'] 96 default_code = v['default_action']
96 action = code if code else default_code 97 action = code if code else default_code
97 if action: 98 if action:
98 graph = NfaBuilder.add_action(graph, (precedence, action)) 99 graph = NfaBuilder.add_action(graph, (precedence, action))
99 if not transition: 100 if not transition or transition == 'break':
100 pass 101 pass
101 elif transition == 'continue': 102 elif transition == 'continue':
102 assert not k == 'default' 103 assert not k == 'default'
104 continues += 1
103 graph = NfaBuilder.add_continue(graph) 105 graph = NfaBuilder.add_continue(graph)
104 elif transition == 'break':
105 assert code
106 graph = NfaBuilder.add_break(graph)
107 elif (transition == 'terminate' or 106 elif (transition == 'terminate' or
108 transition == 'terminate_illegal'): 107 transition == 'terminate_illegal'):
109 assert not code 108 assert not code
110 graph = NfaBuilder.add_action(graph, (-1, transition)) 109 graph = NfaBuilder.add_action(graph, (-1, transition))
111 else: 110 else:
112 assert k == 'default' 111 assert k == 'default'
113 subgraph_modifier = '*' if code else None 112 subgraph_modifier = '*' if code else None
114 graph = NfaBuilder.join_subgraph( 113 graph = NfaBuilder.join_subgraph(
115 graph, transition, rule_map[transition], subgraph_modifier) 114 graph, transition, rule_map[transition], subgraph_modifier)
116 graphs.append(graph) 115 graphs.append(graph)
116 if continues == len(graphs):
117 graphs.append(NfaBuilder.epsilon())
118 if v['catch_all']:
119 assert v['catch_all'] == 'continue'
120 graphs.append(NfaBuilder.add_continue(NfaBuilder.catch_all()))
117 graph = NfaBuilder.or_graphs(graphs) 121 graph = NfaBuilder.or_graphs(graphs)
118 rule_map[k] = graph 122 rule_map[k] = graph
119 # process first the subgraphs, then the default graph 123 # process first the subgraphs, then the default graph
120 for k, v in parser_state.rules.items(): 124 for k, v in parser_state.rules.items():
121 if k == 'default': continue 125 if k == 'default': continue
122 process(k, v) 126 process(k, v)
123 process('default', parser_state.rules['default']) 127 process('default', parser_state.rules['default'])
124 # build the automata 128 # build the automata
125 for rule_name, graph in rule_map.items(): 129 for rule_name, graph in rule_map.items():
126 nfa = builder.nfa(graph) 130 nfa = builder.nfa(graph)
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 with open(code_file, 'w') as f: 170 with open(code_file, 'w') as f:
167 f.write(code) 171 f.write(code)
168 print "wrote code to %s" % code_file 172 print "wrote code to %s" % code_file
169 173
170 input_file = args.input 174 input_file = args.input
171 if input_file: 175 if input_file:
172 with open(input_file, 'r') as f: 176 with open(input_file, 'r') as f:
173 input_text = f.read() + '\0' 177 input_text = f.read() + '\0'
174 for t in generator.lex(input_text): 178 for t in generator.lex(input_text):
175 print t 179 print t
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698