home *** CD-ROM | disk | FTP | other *** search
- # Begin -- grammar generated by Yapps
- import sys, re
- #import yappsrt
-
- ######################################################################
- ######################################################################
- ######################################################################
- ######################################################################
- # The following is the Yapps 2.2.1 runtime file (yappsrt.py)
-
- #
- # Yapps 2 Runtime, part of Yapps 2 - yet another python parser system
- # Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
- #
- # This version of the Yapps 2 Runtime can be distributed under the
- # terms of the MIT open source license, either found in the LICENSE file
- # included with the Yapps distribution
- # <http://theory.stanford.edu/~amitp/yapps/> or at
- # <http://www.opensource.org/licenses/mit-license.php>
- #
-
- """Run time libraries needed to run parsers generated by Yapps.
-
- This module defines parse-time exception classes, a scanner class, a
- base class for parsers produced by Yapps, and a context class that
- keeps track of the parse stack.
-
- """
-
- import sys, re
-
- class SyntaxError(Exception):
- """When we run into an unexpected token, this is the exception to use"""
- def __init__(self, charpos=-1, msg="Bad Token", context=None):
- Exception.__init__(self)
- self.charpos = charpos
- self.msg = msg
- self.context = context
-
- def __str__(self):
- if self.charpos < 0: return 'SyntaxError'
- else: return 'SyntaxError@char%s(%s)' % (repr(self.charpos), self.msg)
-
- class NoMoreTokens(Exception):
- """Another exception object, for when we run out of tokens"""
- pass
-
- class Scanner:
- """Yapps scanner.
-
- The Yapps scanner can work in context sensitive or context
- insensitive modes. The token(i) method is used to retrieve the
- i-th token. It takes a restrict set that limits the set of tokens
- it is allowed to return. In context sensitive mode, this restrict
- set guides the scanner. In context insensitive mode, there is no
- restriction (the set is always the full set of tokens).
-
- """
-
- def __init__(self, patterns, ignore, input):
- """Initialize the scanner.
-
- Parameters:
- patterns : [(terminal, uncompiled regex), ...] or None
- ignore : [terminal,...]
- input : string
-
- If patterns is None, we assume that the subclass has
- defined self.patterns : [(terminal, compiled regex), ...].
- Note that the patterns parameter expects uncompiled regexes,
- whereas the self.patterns field expects compiled regexes.
- """
- self.tokens = [] # [(begin char pos, end char pos, token name, matched text), ...]
- self.restrictions = []
- self.input = input
- self.pos = 0
- self.ignore = ignore
- self.first_line_number = 1
-
- if patterns is not None:
- # Compile the regex strings into regex objects
- self.patterns = []
- for terminal, regex in patterns:
- self.patterns.append( (terminal, re.compile(regex)) )
-
- def get_token_pos(self):
- """Get the current token position in the input text."""
- return len(self.tokens)
-
- def get_char_pos(self):
- """Get the current char position in the input text."""
- return self.pos
-
- def get_prev_char_pos(self, i=None):
- """Get the previous position (one token back) in the input text."""
- if self.pos == 0: return 0
- if i is None: i = -1
- return self.tokens[i][0]
-
- def get_line_number(self):
- """Get the line number of the current position in the input text."""
- # TODO: make this work at any token/char position
- return self.first_line_number + self.get_input_scanned().count('\n')
-
- def get_column_number(self):
- """Get the column number of the current position in the input text."""
- s = self.get_input_scanned()
- i = s.rfind('\n') # may be -1, but that's okay in this case
- return len(s) - (i+1)
-
- def get_input_scanned(self):
- """Get the portion of the input that has been tokenized."""
- return self.input[:self.pos]
-
- def get_input_unscanned(self):
- """Get the portion of the input that has not yet been tokenized."""
- return self.input[self.pos:]
-
- def token(self, i, restrict=None):
- """Get the i'th token in the input.
-
- If i is one past the end, then scan for another token.
-
- Args:
-
- restrict : [token, ...] or None; if restrict is None, then any
- token is allowed. You may call token(i) more than once.
- However, the restrict set may never be larger than what was
- passed in on the first call to token(i).
-
- """
- if i == len(self.tokens):
- self.scan(restrict)
- if i < len(self.tokens):
- # Make sure the restriction is more restricted. This
- # invariant is needed to avoid ruining tokenization at
- # position i+1 and higher.
- if restrict and self.restrictions[i]:
- for r in restrict:
- if r not in self.restrictions[i]:
- raise NotImplementedError("Unimplemented: restriction set changed")
- return self.tokens[i]
- raise NoMoreTokens()
-
- def __repr__(self):
- """Print the last 10 tokens that have been scanned in"""
- output = ''
- for t in self.tokens[-10:]:
- output = '%s\n (@%s) %s = %s' % (output,t[0],t[2],repr(t[3]))
- return output
-
- def scan(self, restrict):
- """Should scan another token and add it to the list, self.tokens,
- and add the restriction to self.restrictions"""
- # Keep looking for a token, ignoring any in self.ignore
- while 1:
- # Search the patterns for the longest match, with earlier
- # tokens in the list having preference
- best_match = -1
- best_pat = '(error)'
- for p, regexp in self.patterns:
- # First check to see if we're ignoring this token
- if restrict and p not in restrict and p not in self.ignore:
- continue
- m = regexp.match(self.input, self.pos)
- if m and len(m.group(0)) > best_match:
- # We got a match that's better than the previous one
- best_pat = p
- best_match = len(m.group(0))
-
- # If we didn't find anything, raise an error
- if best_pat == '(error)' and best_match < 0:
- msg = 'Bad Token'
- if restrict:
- msg = 'Trying to find one of '+', '.join(restrict)
- raise SyntaxError(self.pos, msg)
-
- # If we found something that isn't to be ignored, return it
- if best_pat not in self.ignore:
- # Create a token with this data
- token = (self.pos, self.pos+best_match, best_pat,
- self.input[self.pos:self.pos+best_match])
- self.pos = self.pos + best_match
- # Only add this token if it's not in the list
- # (to prevent looping)
- if not self.tokens or token != self.tokens[-1]:
- self.tokens.append(token)
- self.restrictions.append(restrict)
- return
- else:
- # This token should be ignored ..
- self.pos = self.pos + best_match
-
- class Parser:
- """Base class for Yapps-generated parsers.
-
- """
-
- def __init__(self, scanner):
- self._scanner = scanner
- self._pos = 0
-
- def _peek(self, *types):
- """Returns the token type for lookahead; if there are any args
- then the list of args is the set of token types to allow"""
- tok = self._scanner.token(self._pos, types)
- return tok[2]
-
- def _scan(self, type):
- """Returns the matched text, and moves to the next token"""
- tok = self._scanner.token(self._pos, [type])
- if tok[2] != type:
- raise SyntaxError(tok[0], 'Trying to find '+type+' :'+ ' ,'.join(self._scanner.restrictions[self._pos]))
- self._pos = 1 + self._pos
- return tok[3]
-
- class Context:
- """Class to represent the parser's call stack.
-
- Every rule creates a Context that links to its parent rule. The
- contexts can be used for debugging.
-
- """
-
- def __init__(self, parent, scanner, tokenpos, rule, args=()):
- """Create a new context.
-
- Args:
- parent: Context object or None
- scanner: Scanner object
- pos: integer (scanner token position)
- rule: string (name of the rule)
- args: tuple listing parameters to the rule
-
- """
- self.parent = parent
- self.scanner = scanner
- self.tokenpos = tokenpos
- self.rule = rule
- self.args = args
-
- def __str__(self):
- output = ''
- if self.parent: output = str(self.parent) + ' > '
- output += self.rule
- return output
-
- def print_line_with_pointer(text, p):
- """Print the line of 'text' that includes position 'p',
- along with a second line with a single caret (^) at position p"""
- # Now try printing part of the line
- text = text[max(p-80, 0):p+80]
- p = p - max(p-80, 0)
-
- # Strip to the left
- i = text[:p].rfind('\n')
- j = text[:p].rfind('\r')
- if i < 0 or (0 <= j < i): i = j
- if 0 <= i < p:
- p = p - i - 1
- text = text[i+1:]
-
- # Strip to the right
- i = text.find('\n', p)
- j = text.find('\r', p)
- if i < 0 or (0 <= j < i): i = j
- if i >= 0:
- text = text[:i]
-
- # Now shorten the text
- while len(text) > 70 and p > 60:
- # Cut off 10 chars
- text = "..." + text[10:]
- p = p - 7
-
- # Now print the string, along with an indicator
- print >>sys.stderr, '> ',text
- print >>sys.stderr, '> ',' '*p + '^'
-
- def print_error(input, err, scanner):
- """Print error messages, the parser stack, and the input text -- for human-readable error messages."""
- # NOTE: this function assumes 80 columns :-(
- # Figure out the line number
- line_number = scanner.get_line_number()
- column_number = scanner.get_column_number()
- print >>sys.stderr, '%d:%d: %s' % (line_number, column_number, err.msg)
-
- context = err.context
- if not context:
- print_line_with_pointer(input, err.charpos)
-
- while context:
- # TODO: add line number
- print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args))
- print_line_with_pointer(input, context.scanner.get_prev_char_pos(context.tokenpos))
- context = context.parent
-
- def wrap_error_reporter(parser, rule):
- try:
- return getattr(parser, rule)()
- except SyntaxError, e:
- input = parser._scanner.input
- print_error(input, e, parser._scanner)
- except NoMoreTokens:
- print >>sys.stderr, 'Could not complete parsing; stopped around here:'
- print >>sys.stderr, parser._scanner
-
- ######################################################################
- ######################################################################
- ######################################################################
- ######################################################################
- class YappsRT:
- pass
-
- yappsrt = YappsRT()
- yappsrt.Scanner = Scanner
- yappsrt.Parser = Parser
- yappsrt.Context = Context
- yappsrt.SyntaxError = SyntaxError
- yappsrt.NoMoreTokens = NoMoreTokens
- yappsrt.wrap_error_reporter = wrap_error_reporter
-
- ######################################################################
-
- # The following is generated by Yapps2.1.1:
-
- class _SLParserBaseScanner(yappsrt.Scanner):
- patterns = [
- ('"-"', re.compile('-')),
- ('"="', re.compile('=')),
- ('"\\]"', re.compile('\\]')),
- ('"\\["', re.compile('\\[')),
- ('","', re.compile(',')),
- ('";"', re.compile(';')),
- ('"}"', re.compile('}')),
- ('"{"', re.compile('{')),
- ('"\\)"', re.compile('\\)')),
- ('"\\("', re.compile('\\(')),
- ('[ \n\t]+', re.compile('[ \n\t]+')),
- ('END', re.compile('$')),
- ('number', re.compile('[0-9]+(\\.[0-9]*)?([eE][0-9]+)?|\\.[0-9]+([eE][0-9]+)?')),
- ('stringconstant', re.compile('"[^"]*"')),
- ('shader_type', re.compile('(light|surface|volume|displacement|imager)')),
- ('outputspec', re.compile('output')),
- ('type', re.compile('(float|string|color|point|vector|normal|matrix|void)')),
- ('detail', re.compile('(varying|uniform)')),
- ('singletypes', re.compile('(float|string)')),
- ('spacetypes', re.compile('(color|point|vector|normal|matrix)')),
- ('identifier', re.compile('[_a-zA-Z][_a-zA-Z0-9]*')),
- ('binop', re.compile('[+\\-/^*.]')),
- ('preprocessorline', re.compile('#.*')),
- ]
- def __init__(self, str):
- yappsrt.Scanner.__init__(self,None,['[ \n\t]+'],str)
-
- class _SLParserBase(yappsrt.Parser):
- Context = yappsrt.Context
- def definitions(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'definitions', [])
- shaders = []
- while self._peek('END', 'preprocessorline', 'shader_type', 'type', 'identifier') != 'END':
- _token = self._peek('preprocessorline', 'shader_type', 'type', 'identifier')
- if _token == 'preprocessorline':
- preprocessorline = self._scan('preprocessorline')
- self.switchFile(preprocessorline)
- elif _token == 'shader_type':
- shader_def = self.shader_def(_context)
- shaders.append(shader_def)
- else: # in ['type', 'identifier']
- function_def = self.function_def(_context)
- if self._peek() not in ['END', 'preprocessorline', 'shader_type', 'type', 'identifier']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['preprocessorline', 'END', 'shader_type', 'type', 'identifier']))
- END = self._scan('END')
- return shaders
-
- def shader_def(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'shader_def', [])
- shader_type = self._scan('shader_type')
- identifier = self._scan('identifier')
- self.newParams()
- self._scan('"\\("')
- if self._peek('"\\)"', 'outputspec', '";"', 'detail', 'type', '","') in ['outputspec', 'detail', 'type']:
- formals = self.formals(_context)
- self._scan('"\\)"')
- self._scan('"{"')
- self._scan('"}"')
- return (shader_type,identifier,self.params)
-
- def function_def(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'function_def', [])
- if self._peek('type', 'identifier') == 'type':
- type = self._scan('type')
- identifier = self._scan('identifier')
- self._scan('"\\("')
- if self._peek('"\\)"', 'outputspec', '";"', 'detail', 'type', '","') in ['outputspec', 'detail', 'type']:
- formals = self.formals(_context)
- self._scan('"\\)"')
- self._scan('"{"')
- self._scan('"}"')
-
- def formals(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'formals', [])
- formal_var_defs = self.formal_var_defs(_context)
- while self._peek('";"', '"\\)"', '","') == '";"':
- self._scan('";"')
- if self._peek('outputspec', '";"', 'detail', 'type', '","', '"\\)"') in ['outputspec', 'detail', 'type']:
- formal_var_defs = self.formal_var_defs(_context)
- if self._peek() not in ['";"', '"\\)"', '","']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['";"', '"\\)"', '","']))
-
- def formal_var_defs(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'formal_var_defs', [])
- self.newType()
- if self._peek('outputspec', 'detail', 'type') == 'outputspec':
- outputspec = self._scan('outputspec')
- self.output="output"
- typespec = self.typespec(_context)
- def_expressions = self.def_expressions(_context)
-
- def typespec(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'typespec', [])
- if self._peek('detail', 'type') == 'detail':
- detail = self._scan('detail')
- self.detail = detail
- type = self._scan('type')
- self.type = type
-
- def def_expressions(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'def_expressions', [])
- def_expression = self.def_expression(_context)
- while self._peek('","', '";"', '"\\)"') == '","':
- self._scan('","')
- def_expression = self.def_expression(_context)
- if self._peek() not in ['","', '";"', '"\\)"']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['","', '";"', '"\\)"']))
-
- def def_expression(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'def_expression', [])
- identifier = self._scan('identifier')
- self.name = identifier
- if self._peek('"\\["', '"="', '","', '";"', '"\\)"') == '"\\["':
- self._scan('"\\["')
- number = self._scan('number')
- self._scan('"\\]"')
- self.arraylen=int(number)
- if self._peek('"="', '","', '";"', '"\\)"') == '"="':
- def_init = self.def_init(_context)
- self.default = def_init
- self.storeParam()
-
- def def_init(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'def_init', [])
- self._scan('"="')
- expression = self.expression(_context)
- return expression
-
- def expression(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'expression', [])
- expr=""
- _token = self._peek('"-"', 'number', 'stringconstant', 'singletypes', 'spacetypes', '"\\("', '"{"', 'identifier')
- if _token not in ['"-"', 'singletypes', 'spacetypes']:
- primary = self.primary(_context)
- expr+=primary
- elif _token == '"-"':
- self._scan('"-"')
- expression = self.expression(_context)
- expr+="-"+expression
- else: # in ['singletypes', 'spacetypes']
- typecast = self.typecast(_context)
- expression = self.expression(_context)
- expr+=expression
- while self._peek('binop', '","', '"\\)"', '"}"', '";"') == 'binop':
- binop = self._scan('binop')
- expression = self.expression(_context)
- expr+=binop+expression
- if self._peek() not in ['binop', '","', '"\\)"', '"}"', '";"']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['binop', '","', '"\\)"', '"}"', '";"']))
- return expr
-
- def primary(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'primary', [])
- _token = self._peek('number', 'stringconstant', '"\\("', '"{"', 'identifier')
- if _token == 'number':
- number = self._scan('number')
- return number
- elif _token == 'stringconstant':
- stringconstant = self._scan('stringconstant')
- return stringconstant
- elif _token == '"\\("':
- tuple = self.tuple(_context)
- return tuple
- elif _token == '"{"':
- array = self.array(_context)
- return array
- else: # == 'identifier'
- identifier_or_procedurecall = self.identifier_or_procedurecall(_context)
- return identifier_or_procedurecall
-
- def tuple(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'tuple', [])
- tup = ""
- self._scan('"\\("')
- expression = self.expression(_context)
- tup+=expression
- while self._peek('"\\)"', '","') == '","':
- self._scan('","')
- expression = self.expression(_context)
- tup+=", "+expression
- if self._peek() not in ['"\\)"', '","']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['","', '"\\)"']))
- self._scan('"\\)"')
- return "("+tup+")"
-
- def typecast(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'typecast', [])
- _token = self._peek('singletypes', 'spacetypes')
- if _token == 'singletypes':
- singletypes = self._scan('singletypes')
- else: # == 'spacetypes'
- spacetypes = self._scan('spacetypes')
- if self._peek('stringconstant', '"-"', 'number', 'singletypes', 'spacetypes', '"\\("', '"{"', 'identifier') == 'stringconstant':
- spacename = self.spacename(_context)
- self.space = spacename[1:-1]
-
- def spacename(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'spacename', [])
- stringconstant = self._scan('stringconstant')
- return stringconstant
-
- def identifier_or_procedurecall(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'identifier_or_procedurecall', [])
- identifier = self._scan('identifier')
- res = identifier; proc_args=""
- if self._peek('"\\("', 'binop', '","', '"\\)"', '"}"', '";"') == '"\\("':
- self._scan('"\\("')
- if self._peek('"\\)"', '"-"', 'number', 'stringconstant', 'singletypes', 'spacetypes', '"\\("', '"{"', 'identifier') != '"\\)"':
- proc_args = self.proc_args(_context)
- self._scan('"\\)"')
- res+="("+proc_args+")"
- return res
-
- def proc_args(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'proc_args', [])
- expression = self.expression(_context)
- res = expression
- while self._peek('","', '"\\)"') == '","':
- self._scan('","')
- expression = self.expression(_context)
- res+=", "+expression
- if self._peek() not in ['","', '"\\)"']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['","', '"\\)"']))
- return res
-
- def array(self, _parent=None):
- _context = self.Context(_parent, self._scanner, self._pos, 'array', [])
- arr = ""
- self._scan('"{"')
- expression = self.expression(_context)
- arr+=expression; self.appendSpace()
- while self._peek('"}"', '","') == '","':
- self._scan('","')
- expression = self.expression(_context)
- arr+=", "+expression; self.appendSpace()
- if self._peek() not in ['"}"', '","']:
- raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['","', '"}"']))
- self._scan('"}"')
- return "{"+arr+"}"
-
-
- def parse(rule, text):
- P = _SLParserBase(_SLParserBaseScanner(text))
- return yappsrt.wrap_error_reporter(P, rule)
-
- if __name__ == '__main__':
- from sys import argv, stdin
- if len(argv) >= 2:
- if len(argv) >= 3:
- f = open(argv[2],'r')
- else:
- f = stdin
- print parse(argv[1], f.read())
- else: print >>sys.stderr, 'Args: <rule> [<filename>]'
- # End -- grammar generated by Yapps
-