Ply parser updated to 3.8 from http://www.dabeaz.com/ply/

The Ubuntu packaged Ply 3.7 with Ubuntu 16.04's Python makes
ASN.1 based dissector generation fail.

Ply's API changed after 3.5 and the small change to asn2wrs.py
adapts to that.
The commit breaking the API in Ply's repository is the following:

 commit af651673ba6117a0a5405055a92170fffd028106
 Author: David Beazley <dave@dabeaz.com>
 Date:   Tue Apr 21 16:31:32 2015 -0500

    Added optional support for defaulted states

Change-Id: I1db33fdcccf7c39ecdb0e435a5ea9183362471ad
Bug: 12621
Reviewed-on: https://code.wireshark.org/review/16864
Reviewed-by: Balint Reczey <balint@balintreczey.hu>
Petri-Dish: Balint Reczey <balint@balintreczey.hu>
Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org>
Reviewed-by: João Valverde <j@v6e.pt>
Tested-by: João Valverde <j@v6e.pt>
This commit is contained in:
Balint Reczey 2016-08-03 23:28:28 +02:00 committed by João Valverde
parent 92eecfd255
commit d04be0149d
3 changed files with 1514 additions and 1339 deletions

View File

@ -7949,7 +7949,8 @@ def eth_main():
if ectx.dbg('y'): yd = 1 if ectx.dbg('y'): yd = 1
if ectx.dbg('p'): pd = 2 if ectx.dbg('p'): pd = 2
lexer = lex.lex(debug=ld) lexer = lex.lex(debug=ld)
yacc.yacc(method='LALR', debug=yd) parser = yacc.yacc(method='LALR', debug=yd)
parser.defaulted_states = {}
g_conform = ectx.conform g_conform = ectx.conform
ast = [] ast = []
for fn in args: for fn in args:

482
tools/lex.py Executable file → Normal file
View File

@ -1,7 +1,7 @@
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# ply: lex.py # ply: lex.py
# #
# Copyright (C) 2001-2011, # Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC) # David M. Beazley (Dabeaz LLC)
# All rights reserved. # All rights reserved.
# #
@ -31,10 +31,15 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
__version__ = "3.5" __version__ = '3.8'
__tabversion__ = "3.5" # Version of table file used __tabversion__ = '3.8'
import re, sys, types, copy, os, inspect import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types # This tuple contains known string types
try: try:
@ -44,59 +49,55 @@ except AttributeError:
# Python 3.0 # Python 3.0
StringTypes = (str, bytes) StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names # This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error # Exception thrown when invalid token encountered and no default error
# handler is defined. # handler is defined.
class LexError(Exception): class LexError(Exception):
def __init__(self, message, s): def __init__(self, message, s):
self.args = (message,) self.args = (message,)
self.text = s self.text = s
# Token class. This class is used to represent the tokens produced. # Token class. This class is used to represent the tokens produced.
class LexToken(object): class LexToken(object):
def __str__(self): def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos) return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self): def __repr__(self):
return str(self) return str(self)
# This object is a stand-in for a logging object created by the # This object is a stand-in for a logging object created by the
# logging module. # logging module.
class PlyLogger(object): class PlyLogger(object):
def __init__(self, f): def __init__(self, f):
self.f = f self.f = f
def critical(self, msg, *args, **kwargs): def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + "\n") self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs): def warning(self, msg, *args, **kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n") self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs): def error(self, msg, *args, **kwargs):
self.f.write("ERROR: " + (msg % args) + "\n") self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical info = critical
debug = critical debug = critical
# Null logger is used when no output is generated. Does nothing. # Null logger is used when no output is generated. Does nothing.
class NullLogger(object): class NullLogger(object):
def __getattribute__(self, name): def __getattribute__(self, name):
return self return self
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self return self
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# === Lexing Engine === # === Lexing Engine ===
# #
@ -121,22 +122,24 @@ class Lexer:
self.lexstatere = {} # Dictionary mapping lexer states to master regexs self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string) self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any) self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters self.lexignore = '' # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module self.lexmodule = None # Module
self.lineno = 1 # Current line number self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode self.lexoptimize = False # Optimized mode
def clone(self, object=None): def clone(self, object=None):
c = copy.copy(self) c = copy.copy(self)
@ -168,45 +171,39 @@ class Lexer:
# ------------------------------------------------------------ # ------------------------------------------------------------
# writetab() - Write lexer information to a table file # writetab() - Write lexer information to a table file
# ------------------------------------------------------------ # ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""): def writetab(self, lextab, outputdir=''):
if isinstance(tabfile,types.ModuleType): if isinstance(lextab, types.ModuleType):
return raise IOError("Won't overwrite existing lextab module")
basetabfilename = tabfile.split(".")[-1] basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir,basetabfilename)+".py" filename = os.path.join(outputdir, basetabmodule) + '.py'
tf = open(filename,"w") with open(filename, 'w') as tf:
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__)) tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write("_tabversion = %s\n" % repr(__tabversion__)) tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write("_lextokens = %s\n" % repr(self.lextokens)) tf.write('_lextokens = %s\n' % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags)) tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals)) tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo)) tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {} tabre = {}
# Collect all functions in the initial state for statename, lre in self.lexstatere.items():
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = [] titem = []
for i in range(len(lre)): for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i]))) titem.append((retext, _funcs_to_names(func, renames)))
tabre[key] = titem tabre[statename] = titem
tf.write("_lexstatere = %s\n" % repr(tabre)) tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore)) tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {} taberr = {}
for key, ef in self.lexstateerrorf.items(): for statename, ef in self.lexstateerrorf.items():
if ef: taberr[statename] = ef.__name__ if ef else None
taberr[key] = ef.__name__ tf.write('_lexstateerrorf = %s\n' % repr(taberr))
else:
taberr[key] = None tabeof = {}
tf.write("_lexstateerrorf = %s\n" % repr(taberr)) for statename, ef in self.lexstateeoff.items():
tf.close() tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------ # ------------------------------------------------------------
# readtab() - Read lexer information from a tab file # readtab() - Read lexer information from a tab file
@ -215,34 +212,37 @@ class Lexer:
if isinstance(tabfile, types.ModuleType): if isinstance(tabfile, types.ModuleType):
lextab = tabfile lextab = tabfile
else: else:
if sys.version_info[0] < 3: exec('import %s' % tabfile)
exec("import %s as lextab" % tabfile) lextab = sys.modules[tabfile]
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __tabversion__: if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError("Inconsistent PLY version") raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore self.lexstateignore = lextab._lexstateignore
self.lexstatere = {} self.lexstatere = {}
self.lexstateretext = {} self.lexstateretext = {}
for key,lre in lextab._lexstatere.items(): for statename, lre in lextab._lexstatere.items():
titem = [] titem = []
txtitem = [] txtitem = []
for i in range(len(lre)): for pat, func_name in lre:
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict))) titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem self.lexstatere[statename] = titem
self.lexstateretext[key] = txtitem self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {} self.lexstateerrorf = {}
for key,ef in lextab._lexstateerrorf.items(): for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef] self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL') self.begin('INITIAL')
# ------------------------------------------------------------ # ------------------------------------------------------------
@ -252,7 +252,7 @@ class Lexer:
# Pull off the first character to see if s looks like a string # Pull off the first character to see if s looks like a string
c = s[:1] c = s[:1]
if not isinstance(c, StringTypes): if not isinstance(c, StringTypes):
raise ValueError("Expected a string") raise ValueError('Expected a string')
self.lexdata = s self.lexdata = s
self.lexpos = 0 self.lexpos = 0
self.lexlen = len(s) self.lexlen = len(s)
@ -261,12 +261,13 @@ class Lexer:
# begin() - Changes the lexing state # begin() - Changes the lexing state
# ------------------------------------------------------------ # ------------------------------------------------------------
def begin(self, state): def begin(self, state):
if not state in self.lexstatere: if state not in self.lexstatere:
raise ValueError("Undefined state") raise ValueError('Undefined state')
self.lexre = self.lexstatere[state] self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state] self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"") self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None) self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state self.lexstate = state
# ------------------------------------------------------------ # ------------------------------------------------------------
@ -317,7 +318,8 @@ class Lexer:
# Look for a regular expression match # Look for a regular expression match
for lexre, lexindexfunc in self.lexre: for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos) m = lexre.match(lexdata, lexpos)
if not m: continue if not m:
continue
# Create a token for return # Create a token for return
tok = LexToken() tok = LexToken()
@ -355,9 +357,9 @@ class Lexer:
# Verify type of the token. If not in the token map, raise an error # Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize: if not self.lexoptimize:
if not newtok.type in self.lextokens: if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno, func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:]) func.__name__, newtok.type), lexdata[lexpos:])
return newtok return newtok
@ -377,7 +379,7 @@ class Lexer:
tok = LexToken() tok = LexToken()
tok.value = self.lexdata[lexpos:] tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno tok.lineno = self.lineno
tok.type = "error" tok.type = 'error'
tok.lexer = self tok.lexer = self
tok.lexpos = lexpos tok.lexpos = lexpos
self.lexpos = lexpos self.lexpos = lexpos
@ -386,15 +388,27 @@ class Lexer:
# Error method didn't change text position at all. This is an error. # Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos lexpos = self.lexpos
if not newtok: continue if not newtok:
continue
return newtok return newtok
self.lexpos = lexpos self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1 self.lexpos = lexpos + 1
if self.lexdata is None: if self.lexdata is None:
raise RuntimeError("No input string given with input()") raise RuntimeError('No input string given with input()')
return None return None
# Iterator interface # Iterator interface
@ -422,9 +436,8 @@ class Lexer:
# Returns the regular expression assigned to a function either as a doc string # Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator. # or as a .regex attribute attached by the @TOKEN decorator.
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def _get_regex(func): def _get_regex(func):
return getattr(func,"regex",func.__doc__) return getattr(func, 'regex', func.__doc__)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# get_caller_module_dict() # get_caller_module_dict()
@ -433,20 +446,11 @@ def _get_regex(func):
# a caller further down the call stack. This is used to get the environment # a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided. # associated with the yacc() call if none was provided.
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def get_caller_module_dict(levels): def get_caller_module_dict(levels):
try: f = sys._getframe(levels)
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy() ldict = f.f_globals.copy()
if f.f_globals != f.f_locals: if f.f_globals != f.f_locals:
ldict.update(f.f_locals) ldict.update(f.f_locals)
return ldict return ldict
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
@ -455,7 +459,6 @@ def get_caller_module_dict(levels):
# Given a list of regular expression functions, this converts it to a list # Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file # suitable for output to a table file
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist): def _funcs_to_names(funclist, namelist):
result = [] result = []
for f, name in zip(funclist, namelist): for f, name in zip(funclist, namelist):
@ -471,7 +474,6 @@ def _funcs_to_names(funclist,namelist):
# Given a list of regular expression function names, this converts it back to # Given a list of regular expression function names, this converts it back to
# functions. # functions.
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict): def _names_to_funcs(namelist, fdict):
result = [] result = []
for n in namelist: for n in namelist:
@ -488,10 +490,10 @@ def _names_to_funcs(namelist,fdict):
# form the master regular expression. Given limitations in the Python re # form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions. # module, it may be necessary to break the master regex into separate expressions.
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames): def _form_master_re(relist, reflags, ldict, toknames):
if not relist: return [] if not relist:
regex = "|".join(relist) return []
regex = '|'.join(relist)
try: try:
lexre = re.compile(regex, re.VERBOSE | reflags) lexre = re.compile(regex, re.VERBOSE | reflags)
@ -506,7 +508,7 @@ def _form_master_re(relist,reflags,ldict,toknames):
lexindexnames[i] = f lexindexnames[i] = f
elif handle is not None: elif handle is not None:
lexindexnames[i] = f lexindexnames[i] = f
if f.find("ignore_") > 0: if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None) lexindexfunc[i] = (None, None)
else: else:
lexindexfunc[i] = (None, toknames[f]) lexindexfunc[i] = (None, toknames[f])
@ -514,10 +516,11 @@ def _form_master_re(relist,reflags,ldict,toknames):
return [(lexre, lexindexfunc)], [regex], [lexindexnames] return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception: except Exception:
m = int(len(relist)/2) m = int(len(relist)/2)
if m == 0: m = 1 if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return llist+rlist, lre+rre, lnames+rnames return (llist+rlist), (lre+rre), (lnames+rnames)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# def _statetoken(s,names) # def _statetoken(s,names)
@ -527,12 +530,13 @@ def _form_master_re(relist,reflags,ldict,toknames):
# is a tuple of state names and tokenname is the name of the token. For example, # is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def _statetoken(s, names): def _statetoken(s, names):
nonstate = 1 nonstate = 1
parts = s.split("_") parts = s.split('_')
for i in range(1,len(parts)): for i, part in enumerate(parts[1:], 1):
if not parts[i] in names and parts[i] != 'ANY': break if part not in names and part != 'ANY':
break
if i > 1: if i > 1:
states = tuple(parts[1:i]) states = tuple(parts[1:i])
else: else:
@ -541,7 +545,7 @@ def _statetoken(s,names):
if 'ANY' in states: if 'ANY' in states:
states = tuple(names) states = tuple(names)
tokenname = "_".join(parts[i:]) tokenname = '_'.join(parts[i:])
return (states, tokenname) return (states, tokenname)
@ -558,13 +562,9 @@ class LexerReflect(object):
self.tokens = [] self.tokens = []
self.reflags = reflags self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'} self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = {} self.modules = set()
self.error = 0 self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information # Get all of the basic information
def get_all(self): def get_all(self):
@ -582,20 +582,20 @@ class LexerReflect(object):
# Get the tokens map # Get the tokens map
def get_tokens(self): def get_tokens(self):
tokens = self.ldict.get("tokens",None) tokens = self.ldict.get('tokens', None)
if not tokens: if not tokens:
self.log.error("No token list is defined") self.log.error('No token list is defined')
self.error = 1 self.error = True
return return
if not isinstance(tokens, (list, tuple)): if not isinstance(tokens, (list, tuple)):
self.log.error("tokens must be a list or tuple") self.log.error('tokens must be a list or tuple')
self.error = 1 self.error = True
return return
if not tokens: if not tokens:
self.log.error("tokens is empty") self.log.error('tokens is empty')
self.error = 1 self.error = True
return return
self.tokens = tokens self.tokens = tokens
@ -606,54 +606,54 @@ class LexerReflect(object):
for n in self.tokens: for n in self.tokens:
if not _is_identifier.match(n): if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n) self.log.error("Bad token name '%s'", n)
self.error = 1 self.error = True
if n in terminals: if n in terminals:
self.log.warning("Token '%s' multiply defined", n) self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1 terminals[n] = 1
# Get the literals specifier # Get the literals specifier
def get_literals(self): def get_literals(self):
self.literals = self.ldict.get("literals","") self.literals = self.ldict.get('literals', '')
if not self.literals: if not self.literals:
self.literals = "" self.literals = ''
# Validate literals # Validate literals
def validate_literals(self): def validate_literals(self):
try: try:
for c in self.literals: for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1: if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c)) self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = 1 self.error = True
except TypeError: except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters") self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = 1 self.error = True
def get_states(self): def get_states(self):
self.states = self.ldict.get("states",None) self.states = self.ldict.get('states', None)
# Build statemap # Build statemap
if self.states: if self.states:
if not isinstance(self.states, (tuple, list)): if not isinstance(self.states, (tuple, list)):
self.log.error("states must be defined as a tuple or list") self.log.error('states must be defined as a tuple or list')
self.error = 1 self.error = True
else: else:
for s in self.states: for s in self.states:
if not isinstance(s, tuple) or len(s) != 2: if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = 1 self.error = True
continue continue
name, statetype = s name, statetype = s
if not isinstance(name, StringTypes): if not isinstance(name, StringTypes):
self.log.error("State name %s must be a string", repr(name)) self.log.error('State name %s must be a string', repr(name))
self.error = 1 self.error = True
continue continue
if not (statetype == 'inclusive' or statetype == 'exclusive'): if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = 1 self.error = True
continue continue
if name in self.stateinfo: if name in self.stateinfo:
self.log.error("State '%s' already defined", name) self.log.error("State '%s' already defined", name)
self.error = 1 self.error = True
continue continue
self.stateinfo[name] = statetype self.stateinfo[name] = statetype
@ -664,20 +664,20 @@ class LexerReflect(object):
tsymbols = [f for f in self.ldict if f[:2] == 't_'] tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings # Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo: for s in self.stateinfo:
self.funcsym[s] = [] self.funcsym[s] = []
self.strsym[s] = [] self.strsym[s] = []
if len(tsymbols) == 0: if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined") self.log.error('No rules of the form t_rulename are defined')
self.error = 1 self.error = True
return return
for f in tsymbols: for f in tsymbols:
@ -685,15 +685,18 @@ class LexerReflect(object):
states, tokname = _statetoken(f, self.stateinfo) states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname self.toknames[f] = tokname
if hasattr(t,"__call__"): if hasattr(t, '__call__'):
if tokname == 'error': if tokname == 'error':
for s in states: for s in states:
self.errorf[s] = t self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore': elif tokname == 'ignore':
line = func_code(t).co_firstlineno line = t.__code__.co_firstlineno
file = func_code(t).co_filename file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = 1 self.error = True
else: else:
for s in states: for s in states:
self.funcsym[s].append((f, t)) self.funcsym[s].append((f, t))
@ -701,33 +704,25 @@ class LexerReflect(object):
if tokname == 'ignore': if tokname == 'ignore':
for s in states: for s in states:
self.ignore[s] = t self.ignore[s] = t
if "\\" in t: if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f) self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error': elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f) self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1 self.error = True
else: else:
for s in states: for s in states:
self.strsym[s].append((f, t)) self.strsym[s].append((f, t))
else: else:
self.log.error("%s not defined as a function or string", f) self.log.error('%s not defined as a function or string', f)
self.error = 1 self.error = True
# Sort the functions by line number # Sort the functions by line number
for f in self.funcsym.values(): for f in self.funcsym.values():
if sys.version_info[0] < 3: f.sort(key=lambda x: x[1].__code__.co_firstlineno)
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length # Sort the strings by regular expression length
for s in self.strsym.values(): for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]), reverse=True) s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected # Validate all of the t_rules collected
@ -735,102 +730,97 @@ class LexerReflect(object):
for state in self.stateinfo: for state in self.stateinfo:
# Validate all rules defined by functions # Validate all rules defined by functions
for fname, f in self.funcsym[state]: for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno line = f.__code__.co_firstlineno
file = func_code(f).co_filename file = f.__code__.co_filename
module = inspect.getmodule(f) module = inspect.getmodule(f)
self.modules[module] = 1 self.modules.add(module)
tokname = self.toknames[fname] tokname = self.toknames[fname]
if isinstance(f, types.MethodType): if isinstance(f, types.MethodType):
reqargs = 2 reqargs = 2
else: else:
reqargs = 1 reqargs = 1
nargs = func_code(f).co_argcount nargs = f.__code__.co_argcount
if nargs > reqargs: if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = 1 self.error = True
continue continue
if nargs < reqargs: if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = 1 self.error = True
continue continue
if not _get_regex(f): if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = 1 self.error = True
continue continue
try: try:
c = re.compile("(?P<%s>%s)" % (fname, _get_regex(f)), re.VERBOSE | self.reflags) c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(""): if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = 1 self.error = True
except re.error: except re.error as e:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f): if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = 1 self.error = True
# Validate all rules defined by strings # Validate all rules defined by strings
for name, r in self.strsym[state]: for name, r in self.strsym[state]:
tokname = self.toknames[name] tokname = self.toknames[name]
if tokname == 'error': if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name) self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1 self.error = True
continue continue
if not tokname in self.tokens and tokname.find("ignore_") < 0: if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = 1 self.error = True
continue continue
try: try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags) c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match("")): if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name) self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = 1 self.error = True
except re.error: except re.error as e:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s", name, e) self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r: if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = 1 self.error = True
if not self.funcsym[state] and not self.strsym[state]: if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state) self.log.error("No rules defined for state '%s'", state)
self.error = 1 self.error = True
# Validate the error function # Validate the error function
efunc = self.errorf.get(state, None) efunc = self.errorf.get(state, None)
if efunc: if efunc:
f = efunc f = efunc
line = func_code(f).co_firstlineno line = f.__code__.co_firstlineno
file = func_code(f).co_filename file = f.__code__.co_filename
module = inspect.getmodule(f) module = inspect.getmodule(f)
self.modules[module] = 1 self.modules.add(module)
if isinstance(f, types.MethodType): if isinstance(f, types.MethodType):
reqargs = 2 reqargs = 2
else: else:
reqargs = 1 reqargs = 1
nargs = func_code(f).co_argcount nargs = f.__code__.co_argcount
if nargs > reqargs: if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = 1 self.error = True
if nargs < reqargs: if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = 1 self.error = True
for module in self.modules: for module in self.modules:
self.validate_module(module) self.validate_module(module)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# validate_module() # validate_module()
# #
@ -847,10 +837,10 @@ class LexerReflect(object):
counthash = {} counthash = {}
linen += 1 linen += 1
for l in lines: for line in lines:
m = fre.match(l) m = fre.match(line)
if not m: if not m:
m = sre.match(l) m = sre.match(line)
if m: if m:
name = m.group(1) name = m.group(1)
prev = counthash.get(name) prev = counthash.get(name)
@ -858,8 +848,8 @@ class LexerReflect(object):
counthash[name] = linen counthash[name] = linen
else: else:
filename = inspect.getsourcefile(module) filename = inspect.getsourcefile(module)
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev) self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = 1 self.error = True
linen += 1 linen += 1
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
@ -867,8 +857,14 @@ class LexerReflect(object):
# #
# Build all of the regular expression rules from definitions in the supplied module # Build all of the regular expression rules from definitions in the supplied module
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None): def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer global lexer
ldict = None ldict = None
stateinfo = {'INITIAL': 'inclusive'} stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer() lexobj = Lexer()
@ -883,14 +879,26 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
debuglog = PlyLogger(sys.stderr) debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer # Get the module dictionary used for the lexer
if object: module = object if object:
module = object
# Get the module dictionary used for the parser
if module: if module:
_items = [(k, getattr(module, k)) for k in dir(module)] _items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items) ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else: else:
ldict = get_caller_module_dict(2) ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary # Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all() linfo.get_all()
@ -911,14 +919,14 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
# Dump some basic debugging information # Dump some basic debugging information
if debug: if debug:
debuglog.info("lex: tokens = %r", linfo.tokens) debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals) debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo) debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names # Build a dictionary of valid token names
lexobj.lextokens = { } lexobj.lextokens = set()
for n in linfo.tokens: for n in linfo.tokens:
lexobj.lextokens[n] = 1 lexobj.lextokens.add(n)
# Get literals specification # Get literals specification
if isinstance(linfo.literals, (list, tuple)): if isinstance(linfo.literals, (list, tuple)):
@ -926,6 +934,8 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
else: else:
lexobj.lexliterals = linfo.literals lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary # Get the stateinfo dictionary
stateinfo = linfo.stateinfo stateinfo = linfo.stateinfo
@ -936,15 +946,15 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
# Add rules defined by functions first # Add rules defined by functions first
for fname, f in linfo.funcsym[state]: for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno line = f.__code__.co_firstlineno
file = func_code(f).co_filename file = f.__code__.co_filename
regex_list.append("(?P<%s>%s)" % (fname,_get_regex(f))) regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug: if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules # Now add all of the simple rules
for name, r in linfo.strsym[state]: for name, r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r)) regex_list.append('(?P<%s>%s)' % (name, r))
if debug: if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
@ -953,7 +963,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
# Build the master regular expressions # Build the master regular expressions
if debug: if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====") debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs: for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
@ -961,43 +971,47 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
lexobj.lexstateretext[state] = re_text lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names lexobj.lexstaterenames[state] = re_names
if debug: if debug:
for i in range(len(re_text)): for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i]) debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state # For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items(): for state, stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive': if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"] lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext["INITIAL"] lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags lexobj.lexreflags = reflags
# Set up ignore variables # Set up ignore variables
lexobj.lexstateignore = linfo.ignore lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","") lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions # Set up error functions
lexobj.lexstateerrorf = linfo.errorf lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None) lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf: if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined") errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules # Check state information for ignore and error rules
for s, stype in stateinfo.items(): for s, stype in stateinfo.items():
if stype == 'exclusive': if stype == 'exclusive':
if not s in linfo.errorf: if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s) errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore: if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive': elif stype == 'inclusive':
if not s in linfo.errorf: if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None) linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if not s in linfo.ignore: if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","") linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions # Create global versions of the token() and input() functions
token = lexobj.token token = lexobj.token
@ -1006,7 +1020,26 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
# If in optimize mode, we write the lextab # If in optimize mode, we write the lextab
if lextab and optimize: if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir) lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj return lexobj
@ -1024,7 +1057,7 @@ def runmain(lexer=None,data=None):
data = f.read() data = f.read()
f.close() f.close()
except IndexError: except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n") sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read() data = sys.stdin.read()
if lexer: if lexer:
@ -1037,10 +1070,11 @@ def runmain(lexer=None,data=None):
else: else:
_token = token _token = token
while 1: while True:
tok = _token() tok = _token()
if not tok: break if not tok:
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos)) break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# @TOKEN(regex) # @TOKEN(regex)
@ -1051,7 +1085,7 @@ def runmain(lexer=None,data=None):
def TOKEN(r): def TOKEN(r):
def set_regex(f): def set_regex(f):
if hasattr(r,"__call__"): if hasattr(r, '__call__'):
f.regex = _get_regex(r) f.regex = _get_regex(r)
else: else:
f.regex = r f.regex = r

1124
tools/yacc.py Executable file → Normal file

File diff suppressed because it is too large Load Diff