home *** CD-ROM | disk | FTP | other *** search
- # Source Generated with Decompyle++
- # File: in.pyc (Python 2.6)
-
- import re
- from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, using, this
- from pygments.token import Punctuation, Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
- from pygments.util import get_choice_opt
- from pygments import unistring as uni
- from pygments.lexers.web import XmlLexer
- __all__ = [
- 'CSharpLexer',
- 'BooLexer',
- 'VbNetLexer',
- 'CSharpAspxLexer',
- 'VbNetAspxLexer']
-
- def _escape(st):
- return st.replace(u'\\', u'\\\\').replace(u'-', u'\\-').replace(u'[', u'\\[').replace(u']', u'\\]')
-
-
- class CSharpLexer(RegexLexer):
- name = 'C#'
- aliases = [
- 'csharp',
- 'c#']
- filenames = [
- '*.cs']
- mimetypes = [
- 'text/x-csharp']
- flags = re.MULTILINE | re.DOTALL | re.UNICODE
- levels = {
- 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
- 'basic': '@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*',
- 'full': '@?(?:_|[^' + _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl')) + '])' + '[^' + _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc')) + ']*' }
- tokens = { }
- token_variants = True
- for levelname, cs_ident in levels.items():
- tokens[levelname] = {
- 'root': [
- ('^([ \\t]*(?:' + cs_ident + '(?:\\[\\])?\\s+)+?)(' + cs_ident + ')(\\s*)(\\()', bygroups(using(this), Name.Function, Text, Punctuation)),
- ('^\\s*\\[.*?\\]', Name.Attribute),
- ('[^\\S\\n]+', Text),
- ('\\\\\\n', Text),
- ('//.*?\\n', Comment.Single),
- ('/[*](.|\\n)*?[*]/', Comment.Multiline),
- ('\\n', Text),
- ('[~!%^&*()+=|\\[\\]:;,.<>/?-]', Punctuation),
- ('[{}]', Punctuation),
- ('@"(\\\\\\\\|\\\\"|[^"])*"', String),
- ('"(\\\\\\\\|\\\\"|[^"\\n])*["\\n]', String),
- ("'\\\\.'|'[^\\\\]'", String.Char),
- ('[0-9](\\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?', Number),
- ('#[ \\t]*(if|endif|else|elif|define|undef|line|error|warning|region|endregion|pragma)\\b.*?\\n', Comment.Preproc),
- ('\\b(extern)(\\s+)(alias)\\b', bygroups(Keyword, Text, Keyword)),
- ('(abstract|as|base|break|case|catch|checked|const|continue|default|delegate|do|else|enum|event|explicit|extern|false|finally|fixed|for|foreach|goto|if|implicit|in|interface|internal|is|lock|new|null|operator|out|override|params|private|protected|public|readonly|ref|return|sealed|sizeof|stackalloc|static|switch|this|throw|true|try|typeof|unchecked|unsafe|virtual|void|while|get|set|new|partial|yield|add|remove|value)\\b', Keyword),
- ('(global)(::)', bygroups(Keyword, Punctuation)),
- ('(bool|byte|char|decimal|double|float|int|long|object|sbyte|short|string|uint|ulong|ushort)\\b\\??', Keyword.Type),
- ('(class|struct)(\\s+)', bygroups(Keyword, Text), 'class'),
- ('(namespace|using)(\\s+)', bygroups(Keyword, Text), 'namespace'),
- (cs_ident, Name)],
- 'class': [
- (cs_ident, Name.Class, '#pop')],
- 'namespace': [
- ('(?=\\()', Text, '#pop'),
- ('(' + cs_ident + '|\\.)+', Name.Namespace, '#pop')] }
-
-
- def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
- if level not in self._all_tokens:
- self._tokens = self.__class__.process_tokendef(level)
- else:
- self._tokens = self._all_tokens[level]
- RegexLexer.__init__(self, **options)
-
-
-
- class BooLexer(RegexLexer):
- name = 'Boo'
- aliases = [
- 'boo']
- filenames = [
- '*.boo']
- mimetypes = [
- 'text/x-boo']
- tokens = {
- 'root': [
- ('\\s+', Text),
- ('(#|//).*$', Comment.Single),
- ('/[*]', Comment.Multiline, 'comment'),
- ('[]{}:(),.;[]', Punctuation),
- ('\\\\\\n', Text),
- ('\\\\', Text),
- ('(in|is|and|or|not)\\b', Operator.Word),
- ('/(\\\\\\\\|\\\\/|[^/\\s])/', String.Regex),
- ('@/(\\\\\\\\|\\\\/|[^/])*/', String.Regex),
- ('=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
- ('(as|abstract|callable|constructor|destructor|do|import|enum|event|final|get|interface|internal|of|override|partial|private|protected|public|return|set|static|struct|transient|virtual|yield|super|and|break|cast|continue|elif|else|ensure|except|for|given|goto|if|in|is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|while|from|as)\\b', Keyword),
- ('def(?=\\s+\\(.*?\\))', Keyword),
- ('(def)(\\s+)', bygroups(Keyword, Text), 'funcname'),
- ('(class)(\\s+)', bygroups(Keyword, Text), 'classname'),
- ('(namespace)(\\s+)', bygroups(Keyword, Text), 'namespace'),
- ('(?<!\\.)(true|false|null|self|__eval__|__switch__|array|assert|checked|enumerate|filter|getter|len|lock|map|matrix|max|min|normalArrayIndexing|print|property|range|rawArrayIndexing|required|typeof|unchecked|using|yieldAll|zip)\\b', Name.Builtin),
- ('"""(\\\\|\\"|.*?)"""', String.Double),
- ('"(\\\\|\\"|[^"]*?)"', String.Double),
- ("'(\\\\|\\'|[^']*?)'", String.Single),
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name),
- ('(\\d+\\.\\d*|\\d*\\.\\d+)([fF][+-]?[0-9]+)?', Number.Float),
- ('[0-9][0-9\\.]*(m|ms|d|h|s)', Number),
- ('0\\d+', Number.Oct),
- ('0x[a-fA-F0-9]+', Number.Hex),
- ('\\d+L', Number.Integer.Long),
- ('\\d+', Number.Integer)],
- 'comment': [
- ('/[*]', Comment.Multiline, '#push'),
- ('[*]/', Comment.Multiline, '#pop'),
- ('[^/*]', Comment.Multiline),
- ('[*/]', Comment.Multiline)],
- 'funcname': [
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')],
- 'classname': [
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')],
- 'namespace': [
- ('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')] }
-
-
- class VbNetLexer(RegexLexer):
- name = 'VB.net'
- aliases = [
- 'vb.net',
- 'vbnet']
- filenames = [
- '*.vb',
- '*.bas']
- mimetypes = [
- 'text/x-vbnet',
- 'text/x-vba']
- flags = re.MULTILINE | re.IGNORECASE
- tokens = {
- 'root': [
- ('^\\s*<.*?>', Name.Attribute),
- ('\\s+', Text),
- ('\\n', Text),
- ('rem\\b.*?\\n', Comment),
- ("'.*?\\n", Comment),
- ('#If\\s.*?\\sThen|#ElseIf\\s.*?\\sThen|#End\\s+If|#Const|#ExternalSource.*?\\n|#End\\s+ExternalSource|#Region.*?\\n|#End\\s+Region|#ExternalChecksum', Comment.Preproc),
- ('[\\(\\){}!#,.:]', Punctuation),
- ('Option\\s+(Strict|Explicit|Compare)\\s+(On|Off|Binary|Text)', Keyword.Declaration),
- ('(?<!\\.)(AddHandler|Alias|ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|CDec|CDbl|CInt|CLng|CObj|Const|Continue|CSByte|CShort|CSng|CStr|CType|CUInt|CULng|CUShort|Declare|Default|Delegate|Dim|DirectCast|Do|Each|Else|ElseIf|End|EndIf|Enum|Erase|Error|Event|Exit|False|Finally|For|Friend|Function|Get|Global|GoSub|GoTo|Handles|If|Implements|Imports|Inherits|Interface|Let|Lib|Loop|Me|Module|MustInherit|MustOverride|MyBase|MyClass|Namespace|Narrowing|New|Next|Not|Nothing|NotInheritable|NotOverridable|Of|On|Operator|Option|Optional|Overloads|Overridable|Overrides|ParamArray|Partial|Private|Property|Protected|Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|Return|Select|Set|Shadows|Shared|Single|Static|Step|Stop|Structure|Sub|SyncLock|Then|Throw|To|True|Try|TryCast|Wend|Using|When|While|Widening|With|WithEvents|WriteOnly)\\b', Keyword),
- ('(?<!\\.)(Function|Sub|Property)(\\s+)', bygroups(Keyword, Text), 'funcname'),
- ('(?<!\\.)(Class|Structure|Enum)(\\s+)', bygroups(Keyword, Text), 'classname'),
- ('(?<!\\.)(Namespace|Imports)(\\s+)', bygroups(Keyword, Text), 'namespace'),
- ('(?<!\\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|Object|SByte|Short|Single|String|Variant|UInteger|ULong|UShort)\\b', Keyword.Type),
- ('(?<!\\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|Or|OrElse|TypeOf|Xor)\\b', Operator.Word),
- ('&=|[*]=|/=|\\\\=|\\^=|\\+=|-=|<<=|>>=|<<|>>|:=|<=|>=|<>|[-&*/\\\\^+=<>]', Operator),
- ('"', String, 'string'),
- ('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
- ('#.*?#', Literal.Date),
- ('(\\d+\\.\\d*|\\d*\\.\\d+)([fF][+-]?[0-9]+)?', Number.Float),
- ('\\d+([SILDFR]|US|UI|UL)?', Number.Integer),
- ('&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
- ('&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
- ('_\\n', Text)],
- 'string': [
- ('""', String),
- ('"C?', String, '#pop'),
- ('[^"]+', String)],
- 'funcname': [
- ('[a-z_][a-z0-9_]*', Name.Function, '#pop')],
- 'classname': [
- ('[a-z_][a-z0-9_]*', Name.Class, '#pop')],
- 'namespace': [
- ('[a-z_][a-z0-9_.]*', Name.Namespace, '#pop')] }
-
-
- class GenericAspxLexer(RegexLexer):
- name = 'aspx-gen'
- filenames = []
- mimetypes = []
- flags = re.DOTALL
- tokens = {
- 'root': [
- ('(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
- ('(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer), Other, using(XmlLexer))),
- ('(.+?)(?=<)', using(XmlLexer)),
- ('.+', using(XmlLexer))] }
-
-
- class CSharpAspxLexer(DelegatingLexer):
- name = 'aspx-cs'
- aliases = [
- 'aspx-cs']
- filenames = [
- '*.aspx',
- '*.asax',
- '*.ascx',
- '*.ashx',
- '*.asmx',
- '*.axd']
- mimetypes = []
-
- def __init__(self, **options):
- super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer, **options)
-
-
- def analyse_text(text):
- if re.search('Page\\s*Language="C#"', text, re.I) is not None:
- return 0.2
- if re.search('script[^>]+language=["\\\']C#', text, re.I) is not None:
- return 0.15
- return 0.001
-
-
-
- class VbNetAspxLexer(DelegatingLexer):
- name = 'aspx-vb'
- aliases = [
- 'aspx-vb']
- filenames = [
- '*.aspx',
- '*.asax',
- '*.ascx',
- '*.ashx',
- '*.asmx',
- '*.axd']
- mimetypes = []
-
- def __init__(self, **options):
- super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer, **options)
-
-
- def analyse_text(text):
- if re.search('Page\\s*Language="Vb"', text, re.I) is not None:
- return 0.2
- if re.search('script[^>]+language=["\\\']vb', text, re.I) is not None:
- return 0.15
-
-
-