Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: tools/lexer_generator/rule_parser.py

Issue 180213003: Experimental parser: cleanup logging (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/lexer_generator/regex_parser.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 the V8 project authors. All rights reserved. 1 # Copyright 2013 the V8 project authors. All rights reserved.
2 # Redistribution and use in source and binary forms, with or without 2 # Redistribution and use in source and binary forms, with or without
3 # modification, are permitted provided that the following conditions are 3 # modification, are permitted provided that the following conditions are
4 # met: 4 # met:
5 # 5 #
6 # * Redistributions of source code must retain the above copyright 6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer. 7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above 8 # * Redistributions in binary form must reproduce the above
9 # copyright notice, this list of conditions and the following 9 # copyright notice, this list of conditions and the following
10 # disclaimer in the documentation and/or other materials provided 10 # disclaimer in the documentation and/or other materials provided
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 def dfa(self): 383 def dfa(self):
384 if not self.__dfa: 384 if not self.__dfa:
385 (start, dfa_nodes) = self.nfa().compute_dfa() 385 (start, dfa_nodes) = self.nfa().compute_dfa()
386 dfa = Dfa(self.encoding(), start, dfa_nodes) 386 dfa = Dfa(self.encoding(), start, dfa_nodes)
387 if self.name() == 'default': 387 if self.name() == 'default':
388 dfa = BacktrackingGenerator.generate( 388 dfa = BacktrackingGenerator.generate(
389 dfa, self.__rule_processor.default_action()) 389 dfa, self.__rule_processor.default_action())
390 self.__dfa = dfa 390 self.__dfa = dfa
391 return self.__dfa 391 return self.__dfa
392 392
393 def optimize_dfa(self, log = False): 393 def optimize_dfa(self):
394 assert not self.__dfa 394 assert not self.__dfa
395 assert not self.__minimial_dfa 395 assert not self.__minimial_dfa
396 self.__dfa = DfaOptimizer.optimize(self.minimal_dfa(), log) 396 self.__dfa = DfaOptimizer.optimize(self.minimal_dfa())
397 self.__minimial_dfa = None 397 self.__minimial_dfa = None
398 398
399 def minimal_dfa(self): 399 def minimal_dfa(self):
400 if not self.__minimial_dfa: 400 if not self.__minimial_dfa:
401 self.__minimial_dfa = DfaMinimizer(self.dfa()).minimize() 401 self.__minimial_dfa = DfaMinimizer(self.dfa()).minimize()
402 return self.__minimial_dfa 402 return self.__minimial_dfa
403 403
404 def __process_parser_state(self): 404 def __process_parser_state(self):
405 parser_state = self.__parser_state 405 parser_state = self.__parser_state
406 assert 'default' in parser_state.rules, "default lexer state required" 406 assert 'default' in parser_state.rules, "default lexer state required"
407 # Check that we don't transition to a nonexistent state. 407 # Check that we don't transition to a nonexistent state.
408 assert parser_state.transitions <= set(parser_state.rules.keys()) 408 assert parser_state.transitions <= set(parser_state.rules.keys())
409 rule_map = {} 409 rule_map = {}
410 # process all subgraphs 410 # process all subgraphs
411 for tree_name, v in parser_state.rules.items(): 411 for tree_name, v in parser_state.rules.items():
412 assert v['trees'], "lexer state %s is empty" % tree_name 412 assert v['trees'], "lexer state %s is empty" % tree_name
413 rule_map[tree_name] = NfaBuilder.or_terms(v['trees']) 413 rule_map[tree_name] = NfaBuilder.or_terms(v['trees'])
414 # build the automata 414 # build the automata
415 for name, tree in rule_map.items(): 415 for name, tree in rule_map.items():
416 self.__automata[name] = RuleProcessor.Automata( 416 self.__automata[name] = RuleProcessor.Automata(
417 self, parser_state.character_classes, rule_map, name) 417 self, parser_state.character_classes, rule_map, name)
418 # process default_action 418 # process default_action
419 default_action = parser_state.rules['default']['default_action'] 419 default_action = parser_state.rules['default']['default_action']
420 self.__default_action = Action(default_action, 0) 420 self.__default_action = Action(default_action, 0)
OLDNEW
« no previous file with comments | « tools/lexer_generator/regex_parser.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698