Ply parser updated to 3.8 from http://www.dabeaz.com/ply/
The Ubuntu packaged Ply 3.7 with Ubuntu 16.04's Python makes ASN.1 based dissector generation fail. Ply's API changed after 3.5 and the small change to asn2wrs.py adapts to that. The commit breaking the API in Ply's repository is the following: commit af651673ba6117a0a5405055a92170fffd028106 Author: David Beazley <dave@dabeaz.com> Date: Tue Apr 21 16:31:32 2015 -0500 Added optional support for defaulted states Change-Id: I1db33fdcccf7c39ecdb0e435a5ea9183362471ad Bug: 12621 Reviewed-on: https://code.wireshark.org/review/16864 Reviewed-by: Balint Reczey <balint@balintreczey.hu> Petri-Dish: Balint Reczey <balint@balintreczey.hu> Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org> Reviewed-by: João Valverde <j@v6e.pt> Tested-by: João Valverde <j@v6e.pt>
This commit is contained in:
parent
92eecfd255
commit
d04be0149d
@ -7949,7 +7949,8 @@ def eth_main():
|
||||
if ectx.dbg('y'): yd = 1
|
||||
if ectx.dbg('p'): pd = 2
|
||||
lexer = lex.lex(debug=ld)
|
||||
yacc.yacc(method='LALR', debug=yd)
|
||||
parser = yacc.yacc(method='LALR', debug=yd)
|
||||
parser.defaulted_states = {}
|
||||
g_conform = ectx.conform
|
||||
ast = []
|
||||
for fn in args:
|
||||
|
482
tools/lex.py
Executable file → Normal file
482
tools/lex.py
Executable file → Normal file
@ -1,7 +1,7 @@
|
||||
# -----------------------------------------------------------------------------
|
||||
# ply: lex.py
|
||||
#
|
||||
# Copyright (C) 2001-2011,
|
||||
# Copyright (C) 2001-2015,
|
||||
# David M. Beazley (Dabeaz LLC)
|
||||
# All rights reserved.
|
||||
#
|
||||
@ -31,10 +31,15 @@
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
__version__ = "3.5"
|
||||
__tabversion__ = "3.5" # Version of table file used
|
||||
__version__ = '3.8'
|
||||
__tabversion__ = '3.8'
|
||||
|
||||
import re, sys, types, copy, os, inspect
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import copy
|
||||
import os
|
||||
import inspect
|
||||
|
||||
# This tuple contains known string types
|
||||
try:
|
||||
@ -44,59 +49,55 @@ except AttributeError:
|
||||
# Python 3.0
|
||||
StringTypes = (str, bytes)
|
||||
|
||||
# Extract the code attribute of a function. Different implementations
|
||||
# are for Python 2/3 compatibility.
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
def func_code(f):
|
||||
return f.func_code
|
||||
else:
|
||||
def func_code(f):
|
||||
return f.__code__
|
||||
|
||||
# This regular expression is used to match valid token names
|
||||
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
|
||||
|
||||
# Exception thrown when invalid token encountered and no default error
|
||||
# handler is defined.
|
||||
|
||||
class LexError(Exception):
|
||||
def __init__(self, message, s):
|
||||
self.args = (message,)
|
||||
self.text = s
|
||||
|
||||
|
||||
# Token class. This class is used to represent the tokens produced.
|
||||
class LexToken(object):
|
||||
def __str__(self):
|
||||
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
|
||||
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
|
||||
# This object is a stand-in for a logging object created by the
|
||||
# logging module.
|
||||
|
||||
class PlyLogger(object):
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
|
||||
def critical(self, msg, *args, **kwargs):
|
||||
self.f.write((msg % args) + "\n")
|
||||
self.f.write((msg % args) + '\n')
|
||||
|
||||
def warning(self, msg, *args, **kwargs):
|
||||
self.f.write("WARNING: "+ (msg % args) + "\n")
|
||||
self.f.write('WARNING: ' + (msg % args) + '\n')
|
||||
|
||||
def error(self, msg, *args, **kwargs):
|
||||
self.f.write("ERROR: " + (msg % args) + "\n")
|
||||
self.f.write('ERROR: ' + (msg % args) + '\n')
|
||||
|
||||
info = critical
|
||||
debug = critical
|
||||
|
||||
|
||||
# Null logger is used when no output is generated. Does nothing.
|
||||
class NullLogger(object):
|
||||
def __getattribute__(self, name):
|
||||
return self
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# === Lexing Engine ===
|
||||
#
|
||||
@ -121,22 +122,24 @@ class Lexer:
|
||||
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
|
||||
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
|
||||
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
|
||||
self.lexstate = "INITIAL" # Current lexer state
|
||||
self.lexstate = 'INITIAL' # Current lexer state
|
||||
self.lexstatestack = [] # Stack of lexer states
|
||||
self.lexstateinfo = None # State information
|
||||
self.lexstateignore = {} # Dictionary of ignored characters for each state
|
||||
self.lexstateerrorf = {} # Dictionary of error functions for each state
|
||||
self.lexstateeoff = {} # Dictionary of eof functions for each state
|
||||
self.lexreflags = 0 # Optional re compile flags
|
||||
self.lexdata = None # Actual input data (as a string)
|
||||
self.lexpos = 0 # Current position in input text
|
||||
self.lexlen = 0 # Length of the input text
|
||||
self.lexerrorf = None # Error rule (if any)
|
||||
self.lexeoff = None # EOF rule (if any)
|
||||
self.lextokens = None # List of valid tokens
|
||||
self.lexignore = "" # Ignored characters
|
||||
self.lexliterals = "" # Literal characters that can be passed through
|
||||
self.lexignore = '' # Ignored characters
|
||||
self.lexliterals = '' # Literal characters that can be passed through
|
||||
self.lexmodule = None # Module
|
||||
self.lineno = 1 # Current line number
|
||||
self.lexoptimize = 0 # Optimized mode
|
||||
self.lexoptimize = False # Optimized mode
|
||||
|
||||
def clone(self, object=None):
|
||||
c = copy.copy(self)
|
||||
@ -168,45 +171,39 @@ class Lexer:
|
||||
# ------------------------------------------------------------
|
||||
# writetab() - Write lexer information to a table file
|
||||
# ------------------------------------------------------------
|
||||
def writetab(self,tabfile,outputdir=""):
|
||||
if isinstance(tabfile,types.ModuleType):
|
||||
return
|
||||
basetabfilename = tabfile.split(".")[-1]
|
||||
filename = os.path.join(outputdir,basetabfilename)+".py"
|
||||
tf = open(filename,"w")
|
||||
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
|
||||
tf.write("_tabversion = %s\n" % repr(__tabversion__))
|
||||
tf.write("_lextokens = %s\n" % repr(self.lextokens))
|
||||
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
|
||||
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
|
||||
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
|
||||
def writetab(self, lextab, outputdir=''):
|
||||
if isinstance(lextab, types.ModuleType):
|
||||
raise IOError("Won't overwrite existing lextab module")
|
||||
basetabmodule = lextab.split('.')[-1]
|
||||
filename = os.path.join(outputdir, basetabmodule) + '.py'
|
||||
with open(filename, 'w') as tf:
|
||||
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
|
||||
tf.write('_tabversion = %s\n' % repr(__tabversion__))
|
||||
tf.write('_lextokens = %s\n' % repr(self.lextokens))
|
||||
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
|
||||
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
|
||||
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
|
||||
|
||||
# Rewrite the lexstatere table, replacing function objects with function names
|
||||
tabre = {}
|
||||
# Collect all functions in the initial state
|
||||
initial = self.lexstatere["INITIAL"]
|
||||
initialfuncs = []
|
||||
for part in initial:
|
||||
for f in part[1]:
|
||||
if f and f[0]:
|
||||
initialfuncs.append(f)
|
||||
|
||||
for key, lre in self.lexstatere.items():
|
||||
for statename, lre in self.lexstatere.items():
|
||||
titem = []
|
||||
for i in range(len(lre)):
|
||||
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
|
||||
tabre[key] = titem
|
||||
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
|
||||
titem.append((retext, _funcs_to_names(func, renames)))
|
||||
tabre[statename] = titem
|
||||
|
||||
tf.write("_lexstatere = %s\n" % repr(tabre))
|
||||
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
|
||||
tf.write('_lexstatere = %s\n' % repr(tabre))
|
||||
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
|
||||
|
||||
taberr = {}
|
||||
for key, ef in self.lexstateerrorf.items():
|
||||
if ef:
|
||||
taberr[key] = ef.__name__
|
||||
else:
|
||||
taberr[key] = None
|
||||
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
|
||||
tf.close()
|
||||
for statename, ef in self.lexstateerrorf.items():
|
||||
taberr[statename] = ef.__name__ if ef else None
|
||||
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
|
||||
|
||||
tabeof = {}
|
||||
for statename, ef in self.lexstateeoff.items():
|
||||
tabeof[statename] = ef.__name__ if ef else None
|
||||
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# readtab() - Read lexer information from a tab file
|
||||
@ -215,34 +212,37 @@ class Lexer:
|
||||
if isinstance(tabfile, types.ModuleType):
|
||||
lextab = tabfile
|
||||
else:
|
||||
if sys.version_info[0] < 3:
|
||||
exec("import %s as lextab" % tabfile)
|
||||
else:
|
||||
env = { }
|
||||
exec("import %s as lextab" % tabfile, env,env)
|
||||
lextab = env['lextab']
|
||||
exec('import %s' % tabfile)
|
||||
lextab = sys.modules[tabfile]
|
||||
|
||||
if getattr(lextab,"_tabversion","0.0") != __tabversion__:
|
||||
raise ImportError("Inconsistent PLY version")
|
||||
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
|
||||
raise ImportError('Inconsistent PLY version')
|
||||
|
||||
self.lextokens = lextab._lextokens
|
||||
self.lexreflags = lextab._lexreflags
|
||||
self.lexliterals = lextab._lexliterals
|
||||
self.lextokens_all = self.lextokens | set(self.lexliterals)
|
||||
self.lexstateinfo = lextab._lexstateinfo
|
||||
self.lexstateignore = lextab._lexstateignore
|
||||
self.lexstatere = {}
|
||||
self.lexstateretext = {}
|
||||
for key,lre in lextab._lexstatere.items():
|
||||
for statename, lre in lextab._lexstatere.items():
|
||||
titem = []
|
||||
txtitem = []
|
||||
for i in range(len(lre)):
|
||||
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
|
||||
txtitem.append(lre[i][0])
|
||||
self.lexstatere[key] = titem
|
||||
self.lexstateretext[key] = txtitem
|
||||
for pat, func_name in lre:
|
||||
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
|
||||
|
||||
self.lexstatere[statename] = titem
|
||||
self.lexstateretext[statename] = txtitem
|
||||
|
||||
self.lexstateerrorf = {}
|
||||
for key,ef in lextab._lexstateerrorf.items():
|
||||
self.lexstateerrorf[key] = fdict[ef]
|
||||
for statename, ef in lextab._lexstateerrorf.items():
|
||||
self.lexstateerrorf[statename] = fdict[ef]
|
||||
|
||||
self.lexstateeoff = {}
|
||||
for statename, ef in lextab._lexstateeoff.items():
|
||||
self.lexstateeoff[statename] = fdict[ef]
|
||||
|
||||
self.begin('INITIAL')
|
||||
|
||||
# ------------------------------------------------------------
|
||||
@ -252,7 +252,7 @@ class Lexer:
|
||||
# Pull off the first character to see if s looks like a string
|
||||
c = s[:1]
|
||||
if not isinstance(c, StringTypes):
|
||||
raise ValueError("Expected a string")
|
||||
raise ValueError('Expected a string')
|
||||
self.lexdata = s
|
||||
self.lexpos = 0
|
||||
self.lexlen = len(s)
|
||||
@ -261,12 +261,13 @@ class Lexer:
|
||||
# begin() - Changes the lexing state
|
||||
# ------------------------------------------------------------
|
||||
def begin(self, state):
|
||||
if not state in self.lexstatere:
|
||||
raise ValueError("Undefined state")
|
||||
if state not in self.lexstatere:
|
||||
raise ValueError('Undefined state')
|
||||
self.lexre = self.lexstatere[state]
|
||||
self.lexretext = self.lexstateretext[state]
|
||||
self.lexignore = self.lexstateignore.get(state,"")
|
||||
self.lexignore = self.lexstateignore.get(state, '')
|
||||
self.lexerrorf = self.lexstateerrorf.get(state, None)
|
||||
self.lexeoff = self.lexstateeoff.get(state, None)
|
||||
self.lexstate = state
|
||||
|
||||
# ------------------------------------------------------------
|
||||
@ -317,7 +318,8 @@ class Lexer:
|
||||
# Look for a regular expression match
|
||||
for lexre, lexindexfunc in self.lexre:
|
||||
m = lexre.match(lexdata, lexpos)
|
||||
if not m: continue
|
||||
if not m:
|
||||
continue
|
||||
|
||||
# Create a token for return
|
||||
tok = LexToken()
|
||||
@ -355,9 +357,9 @@ class Lexer:
|
||||
|
||||
# Verify type of the token. If not in the token map, raise an error
|
||||
if not self.lexoptimize:
|
||||
if not newtok.type in self.lextokens:
|
||||
if newtok.type not in self.lextokens_all:
|
||||
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
|
||||
func_code(func).co_filename, func_code(func).co_firstlineno,
|
||||
func.__code__.co_filename, func.__code__.co_firstlineno,
|
||||
func.__name__, newtok.type), lexdata[lexpos:])
|
||||
|
||||
return newtok
|
||||
@ -377,7 +379,7 @@ class Lexer:
|
||||
tok = LexToken()
|
||||
tok.value = self.lexdata[lexpos:]
|
||||
tok.lineno = self.lineno
|
||||
tok.type = "error"
|
||||
tok.type = 'error'
|
||||
tok.lexer = self
|
||||
tok.lexpos = lexpos
|
||||
self.lexpos = lexpos
|
||||
@ -386,15 +388,27 @@ class Lexer:
|
||||
# Error method didn't change text position at all. This is an error.
|
||||
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
|
||||
lexpos = self.lexpos
|
||||
if not newtok: continue
|
||||
if not newtok:
|
||||
continue
|
||||
return newtok
|
||||
|
||||
self.lexpos = lexpos
|
||||
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
|
||||
|
||||
if self.lexeoff:
|
||||
tok = LexToken()
|
||||
tok.type = 'eof'
|
||||
tok.value = ''
|
||||
tok.lineno = self.lineno
|
||||
tok.lexpos = lexpos
|
||||
tok.lexer = self
|
||||
self.lexpos = lexpos
|
||||
newtok = self.lexeoff(tok)
|
||||
return newtok
|
||||
|
||||
self.lexpos = lexpos + 1
|
||||
if self.lexdata is None:
|
||||
raise RuntimeError("No input string given with input()")
|
||||
raise RuntimeError('No input string given with input()')
|
||||
return None
|
||||
|
||||
# Iterator interface
|
||||
@ -422,9 +436,8 @@ class Lexer:
|
||||
# Returns the regular expression assigned to a function either as a doc string
|
||||
# or as a .regex attribute attached by the @TOKEN decorator.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _get_regex(func):
|
||||
return getattr(func,"regex",func.__doc__)
|
||||
return getattr(func, 'regex', func.__doc__)
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# get_caller_module_dict()
|
||||
@ -433,20 +446,11 @@ def _get_regex(func):
|
||||
# a caller further down the call stack. This is used to get the environment
|
||||
# associated with the yacc() call if none was provided.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def get_caller_module_dict(levels):
|
||||
try:
|
||||
raise RuntimeError
|
||||
except RuntimeError:
|
||||
e,b,t = sys.exc_info()
|
||||
f = t.tb_frame
|
||||
while levels > 0:
|
||||
f = f.f_back
|
||||
levels -= 1
|
||||
f = sys._getframe(levels)
|
||||
ldict = f.f_globals.copy()
|
||||
if f.f_globals != f.f_locals:
|
||||
ldict.update(f.f_locals)
|
||||
|
||||
return ldict
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -455,7 +459,6 @@ def get_caller_module_dict(levels):
|
||||
# Given a list of regular expression functions, this converts it to a list
|
||||
# suitable for output to a table file
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _funcs_to_names(funclist, namelist):
|
||||
result = []
|
||||
for f, name in zip(funclist, namelist):
|
||||
@ -471,7 +474,6 @@ def _funcs_to_names(funclist,namelist):
|
||||
# Given a list of regular expression function names, this converts it back to
|
||||
# functions.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _names_to_funcs(namelist, fdict):
|
||||
result = []
|
||||
for n in namelist:
|
||||
@ -488,10 +490,10 @@ def _names_to_funcs(namelist,fdict):
|
||||
# form the master regular expression. Given limitations in the Python re
|
||||
# module, it may be necessary to break the master regex into separate expressions.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _form_master_re(relist, reflags, ldict, toknames):
|
||||
if not relist: return []
|
||||
regex = "|".join(relist)
|
||||
if not relist:
|
||||
return []
|
||||
regex = '|'.join(relist)
|
||||
try:
|
||||
lexre = re.compile(regex, re.VERBOSE | reflags)
|
||||
|
||||
@ -506,7 +508,7 @@ def _form_master_re(relist,reflags,ldict,toknames):
|
||||
lexindexnames[i] = f
|
||||
elif handle is not None:
|
||||
lexindexnames[i] = f
|
||||
if f.find("ignore_") > 0:
|
||||
if f.find('ignore_') > 0:
|
||||
lexindexfunc[i] = (None, None)
|
||||
else:
|
||||
lexindexfunc[i] = (None, toknames[f])
|
||||
@ -514,10 +516,11 @@ def _form_master_re(relist,reflags,ldict,toknames):
|
||||
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
|
||||
except Exception:
|
||||
m = int(len(relist)/2)
|
||||
if m == 0: m = 1
|
||||
if m == 0:
|
||||
m = 1
|
||||
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
|
||||
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
|
||||
return llist+rlist, lre+rre, lnames+rnames
|
||||
return (llist+rlist), (lre+rre), (lnames+rnames)
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# def _statetoken(s,names)
|
||||
@ -527,12 +530,13 @@ def _form_master_re(relist,reflags,ldict,toknames):
|
||||
# is a tuple of state names and tokenname is the name of the token. For example,
|
||||
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _statetoken(s, names):
|
||||
nonstate = 1
|
||||
parts = s.split("_")
|
||||
for i in range(1,len(parts)):
|
||||
if not parts[i] in names and parts[i] != 'ANY': break
|
||||
parts = s.split('_')
|
||||
for i, part in enumerate(parts[1:], 1):
|
||||
if part not in names and part != 'ANY':
|
||||
break
|
||||
|
||||
if i > 1:
|
||||
states = tuple(parts[1:i])
|
||||
else:
|
||||
@ -541,7 +545,7 @@ def _statetoken(s,names):
|
||||
if 'ANY' in states:
|
||||
states = tuple(names)
|
||||
|
||||
tokenname = "_".join(parts[i:])
|
||||
tokenname = '_'.join(parts[i:])
|
||||
return (states, tokenname)
|
||||
|
||||
|
||||
@ -558,13 +562,9 @@ class LexerReflect(object):
|
||||
self.tokens = []
|
||||
self.reflags = reflags
|
||||
self.stateinfo = {'INITIAL': 'inclusive'}
|
||||
self.modules = {}
|
||||
self.error = 0
|
||||
|
||||
if log is None:
|
||||
self.log = PlyLogger(sys.stderr)
|
||||
else:
|
||||
self.log = log
|
||||
self.modules = set()
|
||||
self.error = False
|
||||
self.log = PlyLogger(sys.stderr) if log is None else log
|
||||
|
||||
# Get all of the basic information
|
||||
def get_all(self):
|
||||
@ -582,20 +582,20 @@ class LexerReflect(object):
|
||||
|
||||
# Get the tokens map
|
||||
def get_tokens(self):
|
||||
tokens = self.ldict.get("tokens",None)
|
||||
tokens = self.ldict.get('tokens', None)
|
||||
if not tokens:
|
||||
self.log.error("No token list is defined")
|
||||
self.error = 1
|
||||
self.log.error('No token list is defined')
|
||||
self.error = True
|
||||
return
|
||||
|
||||
if not isinstance(tokens, (list, tuple)):
|
||||
self.log.error("tokens must be a list or tuple")
|
||||
self.error = 1
|
||||
self.log.error('tokens must be a list or tuple')
|
||||
self.error = True
|
||||
return
|
||||
|
||||
if not tokens:
|
||||
self.log.error("tokens is empty")
|
||||
self.error = 1
|
||||
self.log.error('tokens is empty')
|
||||
self.error = True
|
||||
return
|
||||
|
||||
self.tokens = tokens
|
||||
@ -606,54 +606,54 @@ class LexerReflect(object):
|
||||
for n in self.tokens:
|
||||
if not _is_identifier.match(n):
|
||||
self.log.error("Bad token name '%s'", n)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
if n in terminals:
|
||||
self.log.warning("Token '%s' multiply defined", n)
|
||||
terminals[n] = 1
|
||||
|
||||
# Get the literals specifier
|
||||
def get_literals(self):
|
||||
self.literals = self.ldict.get("literals","")
|
||||
self.literals = self.ldict.get('literals', '')
|
||||
if not self.literals:
|
||||
self.literals = ""
|
||||
self.literals = ''
|
||||
|
||||
# Validate literals
|
||||
def validate_literals(self):
|
||||
try:
|
||||
for c in self.literals:
|
||||
if not isinstance(c, StringTypes) or len(c) > 1:
|
||||
self.log.error("Invalid literal %s. Must be a single character", repr(c))
|
||||
self.error = 1
|
||||
self.log.error('Invalid literal %s. Must be a single character', repr(c))
|
||||
self.error = True
|
||||
|
||||
except TypeError:
|
||||
self.log.error("Invalid literals specification. literals must be a sequence of characters")
|
||||
self.error = 1
|
||||
self.log.error('Invalid literals specification. literals must be a sequence of characters')
|
||||
self.error = True
|
||||
|
||||
def get_states(self):
|
||||
self.states = self.ldict.get("states",None)
|
||||
self.states = self.ldict.get('states', None)
|
||||
# Build statemap
|
||||
if self.states:
|
||||
if not isinstance(self.states, (tuple, list)):
|
||||
self.log.error("states must be defined as a tuple or list")
|
||||
self.error = 1
|
||||
self.log.error('states must be defined as a tuple or list')
|
||||
self.error = True
|
||||
else:
|
||||
for s in self.states:
|
||||
if not isinstance(s, tuple) or len(s) != 2:
|
||||
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
name, statetype = s
|
||||
if not isinstance(name, StringTypes):
|
||||
self.log.error("State name %s must be a string", repr(name))
|
||||
self.error = 1
|
||||
self.log.error('State name %s must be a string', repr(name))
|
||||
self.error = True
|
||||
continue
|
||||
if not (statetype == 'inclusive' or statetype == 'exclusive'):
|
||||
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
if name in self.stateinfo:
|
||||
self.log.error("State '%s' already defined", name)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
self.stateinfo[name] = statetype
|
||||
|
||||
@ -664,20 +664,20 @@ class LexerReflect(object):
|
||||
tsymbols = [f for f in self.ldict if f[:2] == 't_']
|
||||
|
||||
# Now build up a list of functions and a list of strings
|
||||
|
||||
self.toknames = {} # Mapping of symbols to token names
|
||||
self.funcsym = {} # Symbols defined as functions
|
||||
self.strsym = {} # Symbols defined as strings
|
||||
self.ignore = {} # Ignore strings by state
|
||||
self.errorf = {} # Error functions by state
|
||||
self.eoff = {} # EOF functions by state
|
||||
|
||||
for s in self.stateinfo:
|
||||
self.funcsym[s] = []
|
||||
self.strsym[s] = []
|
||||
|
||||
if len(tsymbols) == 0:
|
||||
self.log.error("No rules of the form t_rulename are defined")
|
||||
self.error = 1
|
||||
self.log.error('No rules of the form t_rulename are defined')
|
||||
self.error = True
|
||||
return
|
||||
|
||||
for f in tsymbols:
|
||||
@ -685,15 +685,18 @@ class LexerReflect(object):
|
||||
states, tokname = _statetoken(f, self.stateinfo)
|
||||
self.toknames[f] = tokname
|
||||
|
||||
if hasattr(t,"__call__"):
|
||||
if hasattr(t, '__call__'):
|
||||
if tokname == 'error':
|
||||
for s in states:
|
||||
self.errorf[s] = t
|
||||
elif tokname == 'eof':
|
||||
for s in states:
|
||||
self.eoff[s] = t
|
||||
elif tokname == 'ignore':
|
||||
line = func_code(t).co_firstlineno
|
||||
file = func_code(t).co_filename
|
||||
line = t.__code__.co_firstlineno
|
||||
file = t.__code__.co_filename
|
||||
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
else:
|
||||
for s in states:
|
||||
self.funcsym[s].append((f, t))
|
||||
@ -701,33 +704,25 @@ class LexerReflect(object):
|
||||
if tokname == 'ignore':
|
||||
for s in states:
|
||||
self.ignore[s] = t
|
||||
if "\\" in t:
|
||||
if '\\' in t:
|
||||
self.log.warning("%s contains a literal backslash '\\'", f)
|
||||
|
||||
elif tokname == 'error':
|
||||
self.log.error("Rule '%s' must be defined as a function", f)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
else:
|
||||
for s in states:
|
||||
self.strsym[s].append((f, t))
|
||||
else:
|
||||
self.log.error("%s not defined as a function or string", f)
|
||||
self.error = 1
|
||||
self.log.error('%s not defined as a function or string', f)
|
||||
self.error = True
|
||||
|
||||
# Sort the functions by line number
|
||||
for f in self.funcsym.values():
|
||||
if sys.version_info[0] < 3:
|
||||
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
|
||||
else:
|
||||
# Python 3.0
|
||||
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
|
||||
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
|
||||
|
||||
# Sort the strings by regular expression length
|
||||
for s in self.strsym.values():
|
||||
if sys.version_info[0] < 3:
|
||||
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
|
||||
else:
|
||||
# Python 3.0
|
||||
s.sort(key=lambda x: len(x[1]), reverse=True)
|
||||
|
||||
# Validate all of the t_rules collected
|
||||
@ -735,102 +730,97 @@ class LexerReflect(object):
|
||||
for state in self.stateinfo:
|
||||
# Validate all rules defined by functions
|
||||
|
||||
|
||||
|
||||
for fname, f in self.funcsym[state]:
|
||||
line = func_code(f).co_firstlineno
|
||||
file = func_code(f).co_filename
|
||||
line = f.__code__.co_firstlineno
|
||||
file = f.__code__.co_filename
|
||||
module = inspect.getmodule(f)
|
||||
self.modules[module] = 1
|
||||
self.modules.add(module)
|
||||
|
||||
tokname = self.toknames[fname]
|
||||
if isinstance(f, types.MethodType):
|
||||
reqargs = 2
|
||||
else:
|
||||
reqargs = 1
|
||||
nargs = func_code(f).co_argcount
|
||||
nargs = f.__code__.co_argcount
|
||||
if nargs > reqargs:
|
||||
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
|
||||
if nargs < reqargs:
|
||||
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
|
||||
if not _get_regex(f):
|
||||
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
|
||||
try:
|
||||
c = re.compile("(?P<%s>%s)" % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
|
||||
if c.match(""):
|
||||
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
|
||||
if c.match(''):
|
||||
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
|
||||
self.error = 1
|
||||
except re.error:
|
||||
_etype, e, _etrace = sys.exc_info()
|
||||
self.error = True
|
||||
except re.error as e:
|
||||
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
|
||||
if '#' in _get_regex(f):
|
||||
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
|
||||
# Validate all rules defined by strings
|
||||
for name, r in self.strsym[state]:
|
||||
tokname = self.toknames[name]
|
||||
if tokname == 'error':
|
||||
self.log.error("Rule '%s' must be defined as a function", name)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
|
||||
if not tokname in self.tokens and tokname.find("ignore_") < 0:
|
||||
if tokname not in self.tokens and tokname.find('ignore_') < 0:
|
||||
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
continue
|
||||
|
||||
try:
|
||||
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
|
||||
if (c.match("")):
|
||||
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
|
||||
if (c.match('')):
|
||||
self.log.error("Regular expression for rule '%s' matches empty string", name)
|
||||
self.error = 1
|
||||
except re.error:
|
||||
_etype, e, _etrace = sys.exc_info()
|
||||
self.error = True
|
||||
except re.error as e:
|
||||
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
|
||||
if '#' in r:
|
||||
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
|
||||
if not self.funcsym[state] and not self.strsym[state]:
|
||||
self.log.error("No rules defined for state '%s'", state)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
|
||||
# Validate the error function
|
||||
efunc = self.errorf.get(state, None)
|
||||
if efunc:
|
||||
f = efunc
|
||||
line = func_code(f).co_firstlineno
|
||||
file = func_code(f).co_filename
|
||||
line = f.__code__.co_firstlineno
|
||||
file = f.__code__.co_filename
|
||||
module = inspect.getmodule(f)
|
||||
self.modules[module] = 1
|
||||
self.modules.add(module)
|
||||
|
||||
if isinstance(f, types.MethodType):
|
||||
reqargs = 2
|
||||
else:
|
||||
reqargs = 1
|
||||
nargs = func_code(f).co_argcount
|
||||
nargs = f.__code__.co_argcount
|
||||
if nargs > reqargs:
|
||||
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
|
||||
if nargs < reqargs:
|
||||
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
|
||||
self.error = 1
|
||||
self.error = True
|
||||
|
||||
for module in self.modules:
|
||||
self.validate_module(module)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# validate_module()
|
||||
#
|
||||
@ -847,10 +837,10 @@ class LexerReflect(object):
|
||||
|
||||
counthash = {}
|
||||
linen += 1
|
||||
for l in lines:
|
||||
m = fre.match(l)
|
||||
for line in lines:
|
||||
m = fre.match(line)
|
||||
if not m:
|
||||
m = sre.match(l)
|
||||
m = sre.match(line)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
prev = counthash.get(name)
|
||||
@ -858,8 +848,8 @@ class LexerReflect(object):
|
||||
counthash[name] = linen
|
||||
else:
|
||||
filename = inspect.getsourcefile(module)
|
||||
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
|
||||
self.error = 1
|
||||
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
|
||||
self.error = True
|
||||
linen += 1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -867,8 +857,14 @@ class LexerReflect(object):
|
||||
#
|
||||
# Build all of the regular expression rules from definitions in the supplied module
|
||||
# -----------------------------------------------------------------------------
|
||||
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
|
||||
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
|
||||
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
|
||||
|
||||
if lextab is None:
|
||||
lextab = 'lextab'
|
||||
|
||||
global lexer
|
||||
|
||||
ldict = None
|
||||
stateinfo = {'INITIAL': 'inclusive'}
|
||||
lexobj = Lexer()
|
||||
@ -883,14 +879,26 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
debuglog = PlyLogger(sys.stderr)
|
||||
|
||||
# Get the module dictionary used for the lexer
|
||||
if object: module = object
|
||||
if object:
|
||||
module = object
|
||||
|
||||
# Get the module dictionary used for the parser
|
||||
if module:
|
||||
_items = [(k, getattr(module, k)) for k in dir(module)]
|
||||
ldict = dict(_items)
|
||||
# If no __file__ attribute is available, try to obtain it from the __module__ instead
|
||||
if '__file__' not in ldict:
|
||||
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
|
||||
else:
|
||||
ldict = get_caller_module_dict(2)
|
||||
|
||||
# Determine if the module is package of a package or not.
|
||||
# If so, fix the tabmodule setting so that tables load correctly
|
||||
pkg = ldict.get('__package__')
|
||||
if pkg and isinstance(lextab, str):
|
||||
if '.' not in lextab:
|
||||
lextab = pkg + '.' + lextab
|
||||
|
||||
# Collect parser information from the dictionary
|
||||
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
|
||||
linfo.get_all()
|
||||
@ -911,14 +919,14 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
|
||||
# Dump some basic debugging information
|
||||
if debug:
|
||||
debuglog.info("lex: tokens = %r", linfo.tokens)
|
||||
debuglog.info("lex: literals = %r", linfo.literals)
|
||||
debuglog.info("lex: states = %r", linfo.stateinfo)
|
||||
debuglog.info('lex: tokens = %r', linfo.tokens)
|
||||
debuglog.info('lex: literals = %r', linfo.literals)
|
||||
debuglog.info('lex: states = %r', linfo.stateinfo)
|
||||
|
||||
# Build a dictionary of valid token names
|
||||
lexobj.lextokens = { }
|
||||
lexobj.lextokens = set()
|
||||
for n in linfo.tokens:
|
||||
lexobj.lextokens[n] = 1
|
||||
lexobj.lextokens.add(n)
|
||||
|
||||
# Get literals specification
|
||||
if isinstance(linfo.literals, (list, tuple)):
|
||||
@ -926,6 +934,8 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
else:
|
||||
lexobj.lexliterals = linfo.literals
|
||||
|
||||
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
|
||||
|
||||
# Get the stateinfo dictionary
|
||||
stateinfo = linfo.stateinfo
|
||||
|
||||
@ -936,15 +946,15 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
|
||||
# Add rules defined by functions first
|
||||
for fname, f in linfo.funcsym[state]:
|
||||
line = func_code(f).co_firstlineno
|
||||
file = func_code(f).co_filename
|
||||
regex_list.append("(?P<%s>%s)" % (fname,_get_regex(f)))
|
||||
line = f.__code__.co_firstlineno
|
||||
file = f.__code__.co_filename
|
||||
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
|
||||
if debug:
|
||||
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
|
||||
|
||||
# Now add all of the simple rules
|
||||
for name, r in linfo.strsym[state]:
|
||||
regex_list.append("(?P<%s>%s)" % (name,r))
|
||||
regex_list.append('(?P<%s>%s)' % (name, r))
|
||||
if debug:
|
||||
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
|
||||
|
||||
@ -953,7 +963,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
# Build the master regular expressions
|
||||
|
||||
if debug:
|
||||
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
|
||||
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
|
||||
|
||||
for state in regexs:
|
||||
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
|
||||
@ -961,43 +971,47 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
lexobj.lexstateretext[state] = re_text
|
||||
lexobj.lexstaterenames[state] = re_names
|
||||
if debug:
|
||||
for i in range(len(re_text)):
|
||||
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
|
||||
for i, text in enumerate(re_text):
|
||||
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
|
||||
|
||||
# For inclusive states, we need to add the regular expressions from the INITIAL state
|
||||
for state, stype in stateinfo.items():
|
||||
if state != "INITIAL" and stype == 'inclusive':
|
||||
if state != 'INITIAL' and stype == 'inclusive':
|
||||
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
|
||||
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
|
||||
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
|
||||
|
||||
lexobj.lexstateinfo = stateinfo
|
||||
lexobj.lexre = lexobj.lexstatere["INITIAL"]
|
||||
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
|
||||
lexobj.lexre = lexobj.lexstatere['INITIAL']
|
||||
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
|
||||
lexobj.lexreflags = reflags
|
||||
|
||||
# Set up ignore variables
|
||||
lexobj.lexstateignore = linfo.ignore
|
||||
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
|
||||
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
|
||||
|
||||
# Set up error functions
|
||||
lexobj.lexstateerrorf = linfo.errorf
|
||||
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
|
||||
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
|
||||
if not lexobj.lexerrorf:
|
||||
errorlog.warning("No t_error rule is defined")
|
||||
errorlog.warning('No t_error rule is defined')
|
||||
|
||||
# Set up eof functions
|
||||
lexobj.lexstateeoff = linfo.eoff
|
||||
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
|
||||
|
||||
# Check state information for ignore and error rules
|
||||
for s, stype in stateinfo.items():
|
||||
if stype == 'exclusive':
|
||||
if not s in linfo.errorf:
|
||||
if s not in linfo.errorf:
|
||||
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
|
||||
if not s in linfo.ignore and lexobj.lexignore:
|
||||
if s not in linfo.ignore and lexobj.lexignore:
|
||||
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
|
||||
elif stype == 'inclusive':
|
||||
if not s in linfo.errorf:
|
||||
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
|
||||
if not s in linfo.ignore:
|
||||
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
|
||||
if s not in linfo.errorf:
|
||||
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
|
||||
if s not in linfo.ignore:
|
||||
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
|
||||
|
||||
# Create global versions of the token() and input() functions
|
||||
token = lexobj.token
|
||||
@ -1006,7 +1020,26 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
|
||||
|
||||
# If in optimize mode, we write the lextab
|
||||
if lextab and optimize:
|
||||
if outputdir is None:
|
||||
# If no output directory is set, the location of the output files
|
||||
# is determined according to the following rules:
|
||||
# - If lextab specifies a package, files go into that package directory
|
||||
# - Otherwise, files go in the same directory as the specifying module
|
||||
if isinstance(lextab, types.ModuleType):
|
||||
srcfile = lextab.__file__
|
||||
else:
|
||||
if '.' not in lextab:
|
||||
srcfile = ldict['__file__']
|
||||
else:
|
||||
parts = lextab.split('.')
|
||||
pkgname = '.'.join(parts[:-1])
|
||||
exec('import %s' % pkgname)
|
||||
srcfile = getattr(sys.modules[pkgname], '__file__', '')
|
||||
outputdir = os.path.dirname(srcfile)
|
||||
try:
|
||||
lexobj.writetab(lextab, outputdir)
|
||||
except IOError as e:
|
||||
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
|
||||
|
||||
return lexobj
|
||||
|
||||
@ -1024,7 +1057,7 @@ def runmain(lexer=None,data=None):
|
||||
data = f.read()
|
||||
f.close()
|
||||
except IndexError:
|
||||
sys.stdout.write("Reading from standard input (type EOF to end):\n")
|
||||
sys.stdout.write('Reading from standard input (type EOF to end):\n')
|
||||
data = sys.stdin.read()
|
||||
|
||||
if lexer:
|
||||
@ -1037,10 +1070,11 @@ def runmain(lexer=None,data=None):
|
||||
else:
|
||||
_token = token
|
||||
|
||||
while 1:
|
||||
while True:
|
||||
tok = _token()
|
||||
if not tok: break
|
||||
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
|
||||
if not tok:
|
||||
break
|
||||
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# @TOKEN(regex)
|
||||
@ -1051,7 +1085,7 @@ def runmain(lexer=None,data=None):
|
||||
|
||||
def TOKEN(r):
|
||||
def set_regex(f):
|
||||
if hasattr(r,"__call__"):
|
||||
if hasattr(r, '__call__'):
|
||||
f.regex = _get_regex(r)
|
||||
else:
|
||||
f.regex = r
|
||||
|
1124
tools/yacc.py
Executable file → Normal file
1124
tools/yacc.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user