| 1 | #!/usr/bin/env python
|
| 2 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
| 3 | # Licensed to PSF under a Contributor Agreement.
|
| 4 |
|
| 5 | # Pgen imports
|
| 6 | #import grammar, token, tokenize
|
| 7 | # NOTE: Need these special versions of token/tokenize for BACKQUOTE and such.
|
| 8 | from . import grammar, token, tokenize
|
| 9 |
|
| 10 | class PgenGrammar(grammar.Grammar):
|
| 11 | pass
|
| 12 |
|
| 13 | class ParserGenerator(object):
|
| 14 |
|
| 15 | def __init__(self, filename, stream=None):
|
| 16 | close_stream = None
|
| 17 | if stream is None:
|
| 18 | stream = open(filename)
|
| 19 | close_stream = stream.close
|
| 20 | self.filename = filename
|
| 21 | self.stream = stream
|
| 22 | self.generator = tokenize.generate_tokens(stream.readline)
|
| 23 | self.gettoken() # Initialize lookahead
|
| 24 | self.dfas, self.startsymbol = self.parse()
|
| 25 | if close_stream is not None:
|
| 26 | close_stream()
|
| 27 | self.first = {} # map from symbol name to set of tokens
|
| 28 | self.addfirstsets()
|
| 29 |
|
| 30 | def make_grammar(self):
|
| 31 | c = PgenGrammar()
|
| 32 | names = list(self.dfas.keys())
|
| 33 | names.sort()
|
| 34 | names.remove(self.startsymbol)
|
| 35 | names.insert(0, self.startsymbol)
|
| 36 | for name in names:
|
| 37 | i = 256 + len(c.symbol2number)
|
| 38 | c.symbol2number[name] = i
|
| 39 | c.number2symbol[i] = name
|
| 40 | for name in names:
|
| 41 | dfa = self.dfas[name]
|
| 42 | states = []
|
| 43 | for state in dfa:
|
| 44 | arcs = []
|
| 45 | for label, next in sorted(state.arcs.items()):
|
| 46 | arcs.append((self.make_label(c, label), dfa.index(next)))
|
| 47 | if state.isfinal:
|
| 48 | arcs.append((0, dfa.index(state)))
|
| 49 | states.append(arcs)
|
| 50 | c.states.append(states)
|
| 51 | c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
|
| 52 | c.start = c.symbol2number[self.startsymbol]
|
| 53 | return c
|
| 54 |
|
| 55 | def make_first(self, c, name):
|
| 56 | rawfirst = self.first[name]
|
| 57 | first = {}
|
| 58 | for label in sorted(rawfirst):
|
| 59 | ilabel = self.make_label(c, label)
|
| 60 | ##assert ilabel not in first # XXX failed on <> ... !=
|
| 61 | first[ilabel] = 1
|
| 62 | return first
|
| 63 |
|
| 64 | def make_label(self, c, label):
|
| 65 | # XXX Maybe this should be a method on a subclass of converter?
|
| 66 | ilabel = len(c.labels)
|
| 67 | if label[0].isalpha():
|
| 68 | # Either a symbol name or a named token
|
| 69 | if label in c.symbol2number:
|
| 70 | # A symbol name (a non-terminal)
|
| 71 | if label in c.symbol2label:
|
| 72 | return c.symbol2label[label]
|
| 73 | else:
|
| 74 | c.labels.append((c.symbol2number[label], None))
|
| 75 | c.symbol2label[label] = ilabel
|
| 76 | return ilabel
|
| 77 | else:
|
| 78 | # A named token (NAME, NUMBER, STRING)
|
| 79 | itoken = getattr(token, label, None)
|
| 80 | assert isinstance(itoken, int), label
|
| 81 | assert itoken in token.tok_name, label
|
| 82 | if itoken in c.tokens:
|
| 83 | return c.tokens[itoken]
|
| 84 | else:
|
| 85 | c.labels.append((itoken, None))
|
| 86 | c.tokens[itoken] = ilabel
|
| 87 | return ilabel
|
| 88 | else:
|
| 89 | # Either a keyword or an operator
|
| 90 | assert label[0] in ('"', "'"), label
|
| 91 | value = eval(label)
|
| 92 | if value[0].isalpha():
|
| 93 | # A keyword
|
| 94 | if value in c.keywords:
|
| 95 | return c.keywords[value]
|
| 96 | else:
|
| 97 | c.labels.append((token.NAME, value))
|
| 98 | c.keywords[value] = ilabel
|
| 99 | return ilabel
|
| 100 | else:
|
| 101 | # An operator (any non-numeric token)
|
| 102 | itoken = grammar.opmap[value] # Fails if unknown token
|
| 103 | if itoken in c.tokens:
|
| 104 | return c.tokens[itoken]
|
| 105 | else:
|
| 106 | c.labels.append((itoken, None))
|
| 107 | c.tokens[itoken] = ilabel
|
| 108 | return ilabel
|
| 109 |
|
| 110 | def addfirstsets(self):
|
| 111 | names = list(self.dfas.keys())
|
| 112 | names.sort()
|
| 113 | for name in names:
|
| 114 | if name not in self.first:
|
| 115 | self.calcfirst(name)
|
| 116 | #print name, self.first[name].keys()
|
| 117 |
|
| 118 | def calcfirst(self, name):
|
| 119 | dfa = self.dfas[name]
|
| 120 | self.first[name] = None # dummy to detect left recursion
|
| 121 | state = dfa[0]
|
| 122 | totalset = {}
|
| 123 | overlapcheck = {}
|
| 124 | for label, next in state.arcs.items():
|
| 125 | if label in self.dfas:
|
| 126 | if label in self.first:
|
| 127 | fset = self.first[label]
|
| 128 | if fset is None:
|
| 129 | raise ValueError("recursion for rule %r" % name)
|
| 130 | else:
|
| 131 | self.calcfirst(label)
|
| 132 | fset = self.first[label]
|
| 133 | totalset.update(fset)
|
| 134 | overlapcheck[label] = fset
|
| 135 | else:
|
| 136 | totalset[label] = 1
|
| 137 | overlapcheck[label] = {label: 1}
|
| 138 | inverse = {}
|
| 139 | for label, itsfirst in overlapcheck.items():
|
| 140 | for symbol in itsfirst:
|
| 141 | if symbol in inverse:
|
| 142 | raise ValueError("rule %s is ambiguous; %s is in the"
|
| 143 | " first sets of %s as well as %s" %
|
| 144 | (name, symbol, label, inverse[symbol]))
|
| 145 | inverse[symbol] = label
|
| 146 | self.first[name] = totalset
|
| 147 |
|
| 148 | def parse(self):
|
| 149 | dfas = {}
|
| 150 | startsymbol = None
|
| 151 | # MSTART: (NEWLINE | RULE)* ENDMARKER
|
| 152 | while self.type != token.ENDMARKER:
|
| 153 | while self.type == token.NEWLINE:
|
| 154 | self.gettoken()
|
| 155 | # RULE: NAME ':' RHS NEWLINE
|
| 156 | name = self.expect(token.NAME)
|
| 157 | self.expect(token.OP, ":")
|
| 158 | a, z = self.parse_rhs()
|
| 159 | self.expect(token.NEWLINE)
|
| 160 | #self.dump_nfa(name, a, z)
|
| 161 | dfa = self.make_dfa(a, z)
|
| 162 | #self.dump_dfa(name, dfa)
|
| 163 | oldlen = len(dfa)
|
| 164 | self.simplify_dfa(dfa)
|
| 165 | newlen = len(dfa)
|
| 166 | dfas[name] = dfa
|
| 167 | #print name, oldlen, newlen
|
| 168 | if startsymbol is None:
|
| 169 | startsymbol = name
|
| 170 | return dfas, startsymbol
|
| 171 |
|
| 172 | def make_dfa(self, start, finish):
|
| 173 | # To turn an NFA into a DFA, we define the states of the DFA
|
| 174 | # to correspond to *sets* of states of the NFA. Then do some
|
| 175 | # state reduction. Let's represent sets as dicts with 1 for
|
| 176 | # values.
|
| 177 | assert isinstance(start, NFAState)
|
| 178 | assert isinstance(finish, NFAState)
|
| 179 | def closure(state):
|
| 180 | base = {}
|
| 181 | addclosure(state, base)
|
| 182 | return base
|
| 183 | def addclosure(state, base):
|
| 184 | assert isinstance(state, NFAState)
|
| 185 | if state in base:
|
| 186 | return
|
| 187 | base[state] = 1
|
| 188 | for label, next in state.arcs:
|
| 189 | if label is None:
|
| 190 | addclosure(next, base)
|
| 191 | states = [DFAState(closure(start), finish)]
|
| 192 | for state in states: # NB states grows while we're iterating
|
| 193 | arcs = {}
|
| 194 | for nfastate in state.nfaset:
|
| 195 | for label, next in nfastate.arcs:
|
| 196 | if label is not None:
|
| 197 | addclosure(next, arcs.setdefault(label, {}))
|
| 198 | for label, nfaset in sorted(arcs.items()):
|
| 199 | for st in states:
|
| 200 | if st.nfaset == nfaset:
|
| 201 | break
|
| 202 | else:
|
| 203 | st = DFAState(nfaset, finish)
|
| 204 | states.append(st)
|
| 205 | state.addarc(st, label)
|
| 206 | return states # List of DFAState instances; first one is start
|
| 207 |
|
| 208 | def dump_nfa(self, name, start, finish):
|
| 209 | print("Dump of NFA for", name)
|
| 210 | todo = [start]
|
| 211 | for i, state in enumerate(todo):
|
| 212 | print(" State", i, state is finish and "(final)" or "")
|
| 213 | for label, next in state.arcs:
|
| 214 | if next in todo:
|
| 215 | j = todo.index(next)
|
| 216 | else:
|
| 217 | j = len(todo)
|
| 218 | todo.append(next)
|
| 219 | if label is None:
|
| 220 | print(" -> %d" % j)
|
| 221 | else:
|
| 222 | print(" %s -> %d" % (label, j))
|
| 223 |
|
| 224 | def dump_dfa(self, name, dfa):
|
| 225 | print("Dump of DFA for", name)
|
| 226 | for i, state in enumerate(dfa):
|
| 227 | print(" State", i, state.isfinal and "(final)" or "")
|
| 228 | for label, next in sorted(state.arcs.items()):
|
| 229 | print(" %s -> %d" % (label, dfa.index(next)))
|
| 230 |
|
| 231 | def simplify_dfa(self, dfa):
|
| 232 | # This is not theoretically optimal, but works well enough.
|
| 233 | # Algorithm: repeatedly look for two states that have the same
|
| 234 | # set of arcs (same labels pointing to the same nodes) and
|
| 235 | # unify them, until things stop changing.
|
| 236 |
|
| 237 | # dfa is a list of DFAState instances
|
| 238 | changes = True
|
| 239 | while changes:
|
| 240 | changes = False
|
| 241 | for i, state_i in enumerate(dfa):
|
| 242 | for j in range(i+1, len(dfa)):
|
| 243 | state_j = dfa[j]
|
| 244 | if state_i == state_j:
|
| 245 | #print " unify", i, j
|
| 246 | del dfa[j]
|
| 247 | for state in dfa:
|
| 248 | state.unifystate(state_j, state_i)
|
| 249 | changes = True
|
| 250 | break
|
| 251 |
|
| 252 | def parse_rhs(self):
|
| 253 | # RHS: ALT ('|' ALT)*
|
| 254 | a, z = self.parse_alt()
|
| 255 | if self.value != "|":
|
| 256 | return a, z
|
| 257 | else:
|
| 258 | aa = NFAState()
|
| 259 | zz = NFAState()
|
| 260 | aa.addarc(a)
|
| 261 | z.addarc(zz)
|
| 262 | while self.value == "|":
|
| 263 | self.gettoken()
|
| 264 | a, z = self.parse_alt()
|
| 265 | aa.addarc(a)
|
| 266 | z.addarc(zz)
|
| 267 | return aa, zz
|
| 268 |
|
| 269 | def parse_alt(self):
|
| 270 | # ALT: ITEM+
|
| 271 | a, b = self.parse_item()
|
| 272 | while (self.value in ("(", "[") or
|
| 273 | self.type in (token.NAME, token.STRING)):
|
| 274 | c, d = self.parse_item()
|
| 275 | b.addarc(c)
|
| 276 | b = d
|
| 277 | return a, b
|
| 278 |
|
| 279 | def parse_item(self):
|
| 280 | # ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
| 281 | if self.value == "[":
|
| 282 | self.gettoken()
|
| 283 | a, z = self.parse_rhs()
|
| 284 | self.expect(token.OP, "]")
|
| 285 | a.addarc(z)
|
| 286 | return a, z
|
| 287 | else:
|
| 288 | a, z = self.parse_atom()
|
| 289 | value = self.value
|
| 290 | if value not in ("+", "*"):
|
| 291 | return a, z
|
| 292 | self.gettoken()
|
| 293 | z.addarc(a)
|
| 294 | if value == "+":
|
| 295 | return a, z
|
| 296 | else:
|
| 297 | return a, a
|
| 298 |
|
| 299 | def parse_atom(self):
|
| 300 | # ATOM: '(' RHS ')' | NAME | STRING
|
| 301 | if self.value == "(":
|
| 302 | self.gettoken()
|
| 303 | a, z = self.parse_rhs()
|
| 304 | self.expect(token.OP, ")")
|
| 305 | return a, z
|
| 306 | elif self.type in (token.NAME, token.STRING):
|
| 307 | a = NFAState()
|
| 308 | z = NFAState()
|
| 309 | a.addarc(z, self.value)
|
| 310 | self.gettoken()
|
| 311 | return a, z
|
| 312 | else:
|
| 313 | self.raise_error("expected (...) or NAME or STRING, got %s/%s",
|
| 314 | self.type, self.value)
|
| 315 |
|
| 316 | def expect(self, type, value=None):
|
| 317 | if self.type != type or (value is not None and self.value != value):
|
| 318 | self.raise_error("expected %s/%s, got %s/%s",
|
| 319 | type, value, self.type, self.value)
|
| 320 | value = self.value
|
| 321 | self.gettoken()
|
| 322 | return value
|
| 323 |
|
| 324 | def gettoken(self):
|
| 325 | tup = next(self.generator)
|
| 326 | while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
| 327 | tup = next(self.generator)
|
| 328 | self.type, self.value, self.begin, self.end, self.line = tup
|
| 329 | #print token.tok_name[self.type], repr(self.value)
|
| 330 |
|
| 331 | def raise_error(self, msg, *args):
|
| 332 | if args:
|
| 333 | try:
|
| 334 | msg = msg % args
|
| 335 | except:
|
| 336 | msg = " ".join([msg] + list(map(str, args)))
|
| 337 | raise SyntaxError(msg, (self.filename, self.end[0],
|
| 338 | self.end[1], self.line))
|
| 339 |
|
| 340 | class NFAState(object):
|
| 341 |
|
| 342 | def __init__(self):
|
| 343 | self.arcs = [] # list of (label, NFAState) pairs
|
| 344 |
|
| 345 | def addarc(self, next, label=None):
|
| 346 | assert label is None or isinstance(label, str)
|
| 347 | assert isinstance(next, NFAState)
|
| 348 | self.arcs.append((label, next))
|
| 349 |
|
| 350 | class DFAState(object):
|
| 351 |
|
| 352 | def __init__(self, nfaset, final):
|
| 353 | assert isinstance(nfaset, dict)
|
| 354 | assert isinstance(next(iter(nfaset)), NFAState)
|
| 355 | assert isinstance(final, NFAState)
|
| 356 | self.nfaset = nfaset
|
| 357 | self.isfinal = final in nfaset
|
| 358 | self.arcs = {} # map from label to DFAState
|
| 359 |
|
| 360 | def addarc(self, next, label):
|
| 361 | assert isinstance(label, str)
|
| 362 | assert label not in self.arcs
|
| 363 | assert isinstance(next, DFAState)
|
| 364 | self.arcs[label] = next
|
| 365 |
|
| 366 | def unifystate(self, old, new):
|
| 367 | for label, next in self.arcs.items():
|
| 368 | if next is old:
|
| 369 | self.arcs[label] = new
|
| 370 |
|
| 371 | def __eq__(self, other):
|
| 372 | # Equality test -- ignore the nfaset instance variable
|
| 373 | assert isinstance(other, DFAState)
|
| 374 | if self.isfinal != other.isfinal:
|
| 375 | return False
|
| 376 | # Can't just return self.arcs == other.arcs, because that
|
| 377 | # would invoke this method recursively, with cycles...
|
| 378 | if len(self.arcs) != len(other.arcs):
|
| 379 | return False
|
| 380 | for label, next in self.arcs.items():
|
| 381 | if next is not other.arcs.get(label):
|
| 382 | return False
|
| 383 | return True
|
| 384 |
|
| 385 | __hash__ = None # For Py3 compatibility.
|
| 386 |
|
| 387 |
|
| 388 | def generate_grammar(filename="Grammar.txt"):
|
| 389 | # NOTE: This builds the dfa/nfa on the fly. It doesn't make an AST.
|
| 390 | # I think I want pgen.asdl, and then I can interpret that.
|
| 391 | p = ParserGenerator(filename)
|
| 392 | return p.make_grammar()
|
| 393 |
|
| 394 |
|
| 395 | def main():
|
| 396 | print(generate_grammar())
|
| 397 |
|
| 398 |
|
| 399 | if __name__ == '__main__':
|
| 400 | main()
|