home *** CD-ROM | disk | FTP | other *** search
- # Source Generated with Decompyle++
- # File: in.pyc (Python 2.6)
-
- import re
- from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, string_to_tokentype
- from pygments.filter import Filter
- from pygments.util import get_list_opt, get_int_opt, get_bool_opt, get_choice_opt, ClassNotFound, OptionError
- from pygments.plugin import find_plugin_filters
-
- def find_filter_class(filtername):
- if filtername in FILTERS:
- return FILTERS[filtername]
- for name, cls in find_plugin_filters():
- if name == filtername:
- return cls
-
-
-
- def get_filter_by_name(filtername, **options):
- cls = find_filter_class(filtername)
- if cls:
- return cls(**options)
- raise ClassNotFound('filter %r not found' % filtername)
-
-
- def get_all_filters():
- for name in FILTERS:
- yield name
-
- for name, _ in find_plugin_filters():
- yield name
-
-
-
- def _replace_special(ttype, value, regex, specialttype, replacefunc = (lambda x: x)):
- last = 0
- for match in regex.finditer(value):
- start = match.start()
- end = match.end()
- if start != last:
- yield (ttype, value[last:start])
-
- yield (specialttype, replacefunc(value[start:end]))
- last = end
-
- if last != len(value):
- yield (ttype, value[last:])
-
-
-
- class CodeTagFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- tags = get_list_opt(options, 'codetags', [
- 'XXX',
- 'TODO',
- 'BUG',
- 'NOTE'])
- self.tag_re = '|'.join([] % [](_[1]))
-
-
- def filter(self, lexer, stream):
- regex = self.tag_re
- for ttype, value in stream:
- if (ttype in String.Doc or ttype in Comment) and ttype not in Comment.Preproc:
- for sttype, svalue in _replace_special(ttype, value, regex, Comment.Special):
- yield (sttype, svalue)
-
- yield (ttype, value)
-
-
-
-
- class KeywordCaseFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- case = get_choice_opt(options, 'case', [
- 'lower',
- 'upper',
- 'capitalize'], 'lower')
- self.convert = getattr(unicode, case)
-
-
- def filter(self, lexer, stream):
- for ttype, value in stream:
- if ttype in Keyword:
- yield (ttype, self.convert(value))
- continue
- yield (ttype, value)
-
-
-
-
- class NameHighlightFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- self.names = set(get_list_opt(options, 'names', []))
- tokentype = options.get('tokentype')
- if tokentype:
- self.tokentype = string_to_tokentype(tokentype)
- else:
- self.tokentype = Name.Function
-
-
- def filter(self, lexer, stream):
- for ttype, value in stream:
- if ttype is Name and value in self.names:
- yield (self.tokentype, value)
- continue
- yield (ttype, value)
-
-
-
-
- class ErrorToken(Exception):
- pass
-
-
- class RaiseOnErrorTokenFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- self.exception = options.get('excclass', ErrorToken)
-
- try:
- if not issubclass(self.exception, Exception):
- raise TypeError
- issubclass(self.exception, Exception)
- except TypeError:
- raise OptionError('excclass option is not an exception class')
-
-
-
- def filter(self, lexer, stream):
- for ttype, value in stream:
- if ttype is Error:
- raise self.exception(value)
- ttype is Error
- yield (ttype, value)
-
-
-
-
- class VisibleWhitespaceFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- for name, default in {
- 'spaces': u'┬╖',
- 'tabs': u'┬╗',
- 'newlines': u'┬╢' }.items():
- opt = options.get(name, False)
- if isinstance(opt, basestring) and len(opt) == 1:
- setattr(self, name, opt)
- continue
- if not opt or default:
- pass
- setattr(self, name, '')
-
- tabsize = get_int_opt(options, 'tabsize', 8)
- if self.tabs:
- self.tabs += ' ' * (tabsize - 1)
-
- if self.newlines:
- self.newlines += '\n'
-
- self.wstt = get_bool_opt(options, 'wstokentype', True)
-
-
- def filter(self, lexer, stream):
- if self.wstt:
- if not self.spaces:
- pass
- spaces = ' '
- if not self.tabs:
- pass
- tabs = '\t'
- if not self.newlines:
- pass
- newlines = '\n'
- regex = re.compile('\\s')
-
- def replacefunc(wschar):
- if wschar == ' ':
- return spaces
- if wschar == '\t':
- return tabs
- if wschar == '\n':
- return newlines
- return wschar
-
- for ttype, value in stream:
- for sttype, svalue in _replace_special(ttype, value, regex, Whitespace, replacefunc):
- yield (sttype, svalue)
- (None, None, None)
-
-
- else:
- spaces = self.spaces
- tabs = self.tabs
- newlines = self.newlines
- for ttype, value in stream:
- if spaces:
- value = value.replace(' ', spaces)
-
- if tabs:
- value = value.replace('\t', tabs)
-
- if newlines:
- value = value.replace('\n', newlines)
-
- yield (ttype, value)
-
-
-
-
- class GobbleFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
- self.n = get_int_opt(options, 'n', 0)
-
-
- def gobble(self, value, left):
- if left < len(value):
- return (value[left:], 0)
- return ('', left - len(value))
-
-
- def filter(self, lexer, stream):
- n = self.n
- left = n
- for ttype, value in stream:
- parts = value.split('\n')
- (parts[0], left) = self.gobble(parts[0], left)
- for i in range(1, len(parts)):
- (parts[i], left) = self.gobble(parts[i], n)
-
- value = '\n'.join(parts)
- if value != '':
- yield (ttype, value)
- continue
-
-
-
-
- class TokenMergeFilter(Filter):
-
- def __init__(self, **options):
- Filter.__init__(self, **options)
-
-
- def filter(self, lexer, stream):
- output = []
- current_type = None
- current_value = None
- for ttype, value in stream:
- if ttype is current_type:
- current_value += value
- continue
- if current_type is not None:
- yield (current_type, current_value)
-
- current_type = ttype
- current_value = value
-
- if current_type is not None:
- yield (current_type, current_value)
-
-
-
- FILTERS = {
- 'codetagify': CodeTagFilter,
- 'keywordcase': KeywordCaseFilter,
- 'highlight': NameHighlightFilter,
- 'raiseonerror': RaiseOnErrorTokenFilter,
- 'whitespace': VisibleWhitespaceFilter,
- 'gobble': GobbleFilter,
- 'tokenmerge': TokenMergeFilter }
-