Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(254)

Side by Side Diff: tools/lexer_generator/dfa_optimizer.py

Issue 170253007: Experimental parser: always apply default transitions (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/lexer_generator/dfa.py ('k') | tools/lexer_generator/dot_utilities.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 the V8 project authors. All rights reserved. 1 # Copyright 2013 the V8 project authors. All rights reserved.
2 # Redistribution and use in source and binary forms, with or without 2 # Redistribution and use in source and binary forms, with or without
3 # modification, are permitted provided that the following conditions are 3 # modification, are permitted provided that the following conditions are
4 # met: 4 # met:
5 # 5 #
6 # * Redistributions of source code must retain the above copyright 6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer. 7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above 8 # * Redistributions in binary form must reproduce the above
9 # copyright notice, this list of conditions and the following 9 # copyright notice, this list of conditions and the following
10 # disclaimer in the documentation and/or other materials provided 10 # disclaimer in the documentation and/or other materials provided
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
160 incoming_transitions = {} 160 incoming_transitions = {}
161 def f(state, visitor_state): 161 def f(state, visitor_state):
162 for key, transition_state in state.key_state_iter(): 162 for key, transition_state in state.key_state_iter():
163 if not transition_state in incoming_transitions: 163 if not transition_state in incoming_transitions:
164 incoming_transitions[transition_state] = [] 164 incoming_transitions[transition_state] = []
165 incoming_transitions[transition_state].append((key, state)) 165 incoming_transitions[transition_state].append((key, state))
166 dfa.visit_all_states(f) 166 dfa.visit_all_states(f)
167 return incoming_transitions 167 return incoming_transitions
168 168
169 @staticmethod 169 @staticmethod
170 def __match_action(state):
171 state = state.omega_transition()
172 state = state if state and state.transition_count() == 0 else None
173 return Action.empty_action() if not state else state.action()
174
175 @staticmethod
176 def __build_replacement_map(dfa): 170 def __build_replacement_map(dfa):
177 replacements = {} 171 replacements = {}
178 encoding = dfa.encoding() 172 encoding = dfa.encoding()
179 incoming_transitions = DfaOptimizer.__build_incoming_transitions(dfa) 173 incoming_transitions = DfaOptimizer.__build_incoming_transitions(dfa)
180 replacement_targets = set([]) 174 replacement_targets = set([])
181 # TODO(dcarney): this should be an ordered walk 175 # TODO(dcarney): this should be an ordered walk
182 for state, incoming in incoming_transitions.items(): 176 for state, incoming in incoming_transitions.items():
183 if len(incoming) < 10: 177 if len(incoming) < 10:
184 continue 178 continue
185 # the states action will be replaced, so we just don't optimize if there 179 # the states action will be replaced, so we just don't optimize if there
186 # is one 180 # is one
187 if state.action(): # TODO(dcarney): allow this via action chaining 181 if state.action(): # TODO(dcarney): allow this via action chaining
188 continue 182 continue
189 # We only perform this optimization on 'token' actions 183 # We only perform this optimization on 'token' actions
190 match_action = DfaOptimizer.__match_action(state) 184 match_action = state.match_action()
191 if not match_action.name() == 'token': 185 if not match_action.name() == 'token':
192 continue 186 continue
193 assert state.omega_transition() in dfa.terminal_set() 187 assert state.omega_transition() in dfa.terminal_set()
194 # we can drop incoming edges for states with a match action 188 # we can drop incoming edges for states with a match action
195 # of either 'token' or 'harmony_token' 189 # of either 'token' or 'harmony_token'
196 def is_replacement_candidate(state): 190 def is_replacement_candidate(state):
197 action = DfaOptimizer.__match_action(state) 191 action = state.match_action()
198 return action.name() == 'token' or action.name() == 'harmony_token' 192 return action.name() == 'token' or action.name() == 'harmony_token'
199 for (incoming_key, incoming_state) in incoming: 193 for (incoming_key, incoming_state) in incoming:
200 # check to see if incoming edges are matched by an outgoing edge 194 # check to see if incoming edges are matched by an outgoing edge
201 if (incoming_state == state or # drop self edges 195 if (incoming_state == state or # drop self edges
202 incoming_key == TransitionKey.omega() or 196 incoming_key == TransitionKey.omega() or
203 not is_replacement_candidate(incoming_state) or 197 not is_replacement_candidate(incoming_state) or
204 not DfaOptimizer.__transistions_match( 198 not DfaOptimizer.__transistions_match(
205 encoding, incoming_key, incoming_state, state)): 199 encoding, incoming_key, incoming_state, state)):
206 continue 200 continue
207 assert (not incoming_state in replacements or 201 assert (not incoming_state in replacements or
(...skipping 21 matching lines...) Expand all
229 'action' : action, 223 'action' : action,
230 } 224 }
231 225
232 @staticmethod 226 @staticmethod
233 def __new_state_name(state): 227 def __new_state_name(state):
234 return str(state.node_number()) 228 return str(state.node_number())
235 229
236 @staticmethod 230 @staticmethod
237 def __split_target_state(state): 231 def __split_target_state(state):
238 old_name = DfaOptimizer.__new_state_name(state) 232 old_name = DfaOptimizer.__new_state_name(state)
239 old_match_action = DfaOptimizer.__match_action(state) 233 old_match_action = state.match_action()
240 assert old_match_action.name() == 'token', 'unimplemented' 234 assert old_match_action.name() == 'token', 'unimplemented'
241 precedence = old_match_action.precedence() 235 precedence = old_match_action.precedence()
242 match_action = Action(Term('do_stored_token'), precedence) 236 match_action = Action(Term('do_stored_token'), precedence)
243 head_action = Action( 237 stored_token = old_match_action.term().args()[0]
244 Term('store_token', old_match_action.term().args()[0]), precedence) 238 head_action = Action(Term('store_token', stored_token), precedence)
245 tail_action = Action(Term('no_op'), precedence) 239 tail_action = Action(Term('no_op', stored_token), precedence)
246 head = DfaOptimizer.__new_state(False, head_action) 240 head = DfaOptimizer.__new_state(False, head_action)
247 tail = DfaOptimizer.__new_state(False, tail_action) 241 tail = DfaOptimizer.__new_state(False, tail_action)
248 match = DfaOptimizer.__new_state(True, match_action) 242 match = DfaOptimizer.__new_state(True, match_action)
249 head['transitions'][TransitionKey.omega()] = old_name + '_tail' 243 head['transitions'][TransitionKey.omega()] = old_name + '_tail'
250 tail['transitions'][TransitionKey.omega()] = old_name + '_match' 244 tail['transitions'][TransitionKey.omega()] = old_name + '_match'
251 return (head, tail, match) 245 return (head, tail, match)
252 246
253 # generate a store action to replace an existing action 247 # generate a store action to replace an existing action
254 @staticmethod 248 @staticmethod
255 def __replacement_action(state, transition_state): 249 def __replacement_action(state, transition_state):
256 old_action = DfaOptimizer.__match_action(state) 250 old_action = state.match_action()
257 assert old_action 251 assert old_action
258 transition_action = DfaOptimizer.__match_action(transition_state).term() 252 transition_action = transition_state.match_action().term()
259 if old_action.term() == transition_action: 253 if old_action.term() == transition_action:
260 # no need to store token 254 # no need to store token
261 return Action.empty_action() 255 return Action.empty_action()
262 new_name = 'store_' + old_action.name() 256 new_name = 'store_' + old_action.name()
263 return Action( 257 return Action(
264 Term(new_name, *old_action.term().args()), old_action.precedence()) 258 Term(new_name, *old_action.term().args()), old_action.precedence())
265 259
266 @staticmethod 260 @staticmethod
267 def __apply_jump(counters, state, new_state, target_state): 261 def __apply_jump(counters, state, new_state, target_state):
268 # generate a new action for the new omega transition 262 # generate a new action for the new omega transition
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
374 print 'store_harmony_token %s' % ( 368 print 'store_harmony_token %s' % (
375 counters['store_harmony_token']) 369 counters['store_harmony_token'])
376 print 'transitions removed %s' % counters['removals'] 370 print 'transitions removed %s' % counters['removals']
377 print 'states split %s' % counters['split_state'] 371 print 'states split %s' % counters['split_state']
378 return Dfa(self.__dfa.encoding(), start_name, states) 372 return Dfa(self.__dfa.encoding(), start_name, states)
379 373
380 @staticmethod 374 @staticmethod
381 def optimize(dfa, log): 375 def optimize(dfa, log):
382 optimizer = DfaOptimizer(dfa, log) 376 optimizer = DfaOptimizer(dfa, log)
383 return optimizer.__replace_tokens_with_gotos() 377 return optimizer.__replace_tokens_with_gotos()
OLDNEW
« no previous file with comments | « tools/lexer_generator/dfa.py ('k') | tools/lexer_generator/dot_utilities.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698