home *** CD-ROM | disk | FTP | other *** search
- # Source Generated with Decompyle++
- # File: in.pyc (Python 2.6)
-
- __all__ = []
- __docformat__ = 'restructuredtext'
- __version__ = '$Id: util.py 1939 2010-03-22 22:38:43Z cthedot $'
- from helper import normalize
- from itertools import ifilter
- import css
- import codec
- import codecs
- import errorhandler
- import tokenize2
- import types
- import xml.dom as xml
-
- try:
- from _fetchgae import _defaultFetcher
- except ImportError:
- e = None
- from _fetch import _defaultFetcher
-
- log = errorhandler.ErrorHandler()
-
- class _BaseClass(object):
- _log = errorhandler.ErrorHandler()
- _prods = tokenize2.CSSProductions
-
- def _checkReadonly(self):
- if hasattr(self, '_readonly') and self._readonly:
- raise xml.dom.NoModificationAllowedErr(u'%s is readonly.' % self.__class__)
- return self._readonly
- return False
-
-
- def _valuestr(self, t):
- if not t:
- return u''
- if isinstance(t, basestring):
- return t
- return []([ x[1] for x in t ])
-
-
-
- class _NewBase(_BaseClass):
-
- def __init__(self):
- self._seq = Seq()
-
-
- def _setSeq(self, newseq):
- newseq._readonly = True
- self._seq = newseq
-
-
- def _tempSeq(self, readonly = False):
- return Seq(readonly = readonly)
-
- seq = property((lambda self: self._seq), doc = 'Internal readonly attribute, **DO NOT USE**!')
-
-
- class Base(_BaseClass):
- __tokenizer2 = tokenize2.Tokenizer()
- _SHORTHANDPROPERTIES = {
- u'background': [],
- u'background-position': [],
- u'border': [],
- u'border-left': [],
- u'border-right': [],
- u'border-top': [],
- u'border-bottom': [],
- u'cue': [],
- u'font': [],
- u'list-style': [],
- u'outline': [],
- u'pause': [] }
-
- def _normalize(x):
- return normalize(x)
-
- _normalize = staticmethod(_normalize)
-
- def _splitNamespacesOff(self, text_namespaces_tuple):
- if isinstance(text_namespaces_tuple, tuple):
- return (text_namespaces_tuple[0], _SimpleNamespaces(self._log, text_namespaces_tuple[1]))
- return (text_namespaces_tuple, _SimpleNamespaces(log = self._log))
-
-
- def _tokenize2(self, textortokens):
- if not textortokens:
- return None
- if isinstance(textortokens, basestring):
- return self._Base__tokenizer2.tokenize(textortokens)
- if types.GeneratorType == type(textortokens):
- return textortokens
- if isinstance(textortokens, tuple):
- return [
- textortokens]
- return (lambda .0: for x in .0:
- x)(textortokens)
-
-
- def _nexttoken(self, tokenizer, default = None):
-
- try:
- return tokenizer.next()
- except (StopIteration, AttributeError):
- return default
-
-
-
- def _type(self, token):
- if token:
- return token[0]
- return None
-
-
- def _tokenvalue(self, token, normalize = False):
- if token and normalize:
- return Base._normalize(token[1])
- if token:
- return token[1]
- return None
-
-
- def _stringtokenvalue(self, token):
- if token:
- value = token[1]
- return value.replace('\\' + value[0], value[0])[1:-1]
- return None
-
-
- def _uritokenvalue(self, token):
- if token:
- value = token[1][4:-1].strip()
- if value and value[0] in '\'"' and value[0] == value[-1]:
- value = value.replace('\\' + value[0], value[0])[1:-1]
-
- return value
- return None
-
-
- def _tokensupto2(self, tokenizer, starttoken = None, blockstartonly = False, blockendonly = False, mediaendonly = False, importmediaqueryendonly = False, mediaqueryendonly = False, semicolon = False, propertynameendonly = False, propertyvalueendonly = False, propertypriorityendonly = False, selectorattendonly = False, funcendonly = False, listseponly = False, separateEnd = False):
- ends = u';}'
- endtypes = ()
- brace = bracket = parant = 0
- if blockstartonly:
- ends = u'{'
- brace = -1
- elif blockendonly:
- ends = u'}'
- brace = 1
- elif mediaendonly:
- ends = u'}'
- brace = 1
- elif importmediaqueryendonly:
- ends = u';'
- endtypes = ('STRING',)
- elif mediaqueryendonly:
- ends = u'{'
- brace = -1
- endtypes = ('STRING',)
- elif semicolon:
- ends = u';'
- elif propertynameendonly:
- ends = u':;'
- elif propertyvalueendonly:
- ends = u';!'
- elif propertypriorityendonly:
- ends = u';'
- elif selectorattendonly:
- ends = u']'
- if starttoken and self._tokenvalue(starttoken) == u'[':
- bracket = 1
-
- elif funcendonly:
- ends = u')'
- parant = 1
- elif listseponly:
- ends = u','
-
- resulttokens = []
- if starttoken:
- resulttokens.append(starttoken)
-
- if tokenizer:
- for token in tokenizer:
- (typ, val, line, col) = token
- if 'EOF' == typ:
- resulttokens.append(token)
- break
-
- if u'{' == val:
- brace += 1
- elif u'}' == val:
- brace -= 1
- elif u'[' == val:
- bracket += 1
- elif u']' == val:
- bracket -= 1
- elif u'(' == val or Base._prods.FUNCTION == typ:
- parant += 1
- elif u')' == val:
- parant -= 1
-
- resulttokens.append(token)
- if bracket == bracket and parant == parant:
- pass
- elif parant == 0:
- if val in ends or typ in endtypes:
- break
- continue
- if mediaqueryendonly and brace == -1:
- if parant == parant:
- pass
- elif parant == 0 and typ in endtypes:
- break
- continue
-
-
- if separateEnd:
- if resulttokens:
- return (resulttokens[:-1], resulttokens[-1])
- return (resulttokens, None)
- separateEnd
- return resulttokens
-
-
- def _adddefaultproductions(self, productions, new = None):
-
- def ATKEYWORD(expected, seq, token, tokenizer = (None, None)):
- if expected != 'EOF':
- rule = css.CSSUnknownRule()
- rule.cssText = self._tokensupto2(tokenizer, token)
- if rule.wellformed:
- seq.append(rule)
-
- return expected
- new['wellformed'] = False
- self._log.error(u'Expected EOF.', token = token)
- return expected
-
-
- def COMMENT(expected, seq, token, tokenizer = None):
- seq.append(css.CSSComment([
- token]))
- return expected
-
-
- def S(expected, seq, token, tokenizer = None):
- return expected
-
-
- def EOF(expected = None, seq = None, token = None, tokenizer = None):
- return 'EOF'
-
- p = {
- 'ATKEYWORD': ATKEYWORD,
- 'COMMENT': COMMENT,
- 'S': S,
- 'EOF': EOF }
- p.update(productions)
- return p
-
-
- def _parse(self, expected, seq, tokenizer, productions, default = None, new = None, initialtoken = None):
- wellformed = True
- if initialtoken:
-
- def tokens():
- yield initialtoken
- for item in tokenizer:
- yield item
-
-
- fulltokenizer = (lambda .0: for t in .0:
- t)(tokens())
- else:
- fulltokenizer = tokenizer
- if fulltokenizer:
- prods = self._adddefaultproductions(productions, new)
- for token in fulltokenizer:
- p = prods.get(token[0], default)
- if p:
- expected = p(expected, seq, token, tokenizer)
- continue
- wellformed = False
- self._log.error(u'Unexpected token (%s, %s, %s, %s)' % token)
-
-
- return (wellformed, expected)
-
-
-
- class Base2(Base, _NewBase):
-
- def __init__(self):
- self._seq = Seq()
-
-
- def _adddefaultproductions(self, productions, new = None):
-
- def ATKEYWORD(expected, seq, token, tokenizer = (None, None)):
- if expected != 'EOF':
- rule = css.CSSUnknownRule()
- rule.cssText = self._tokensupto2(tokenizer, token)
- if rule.wellformed:
- seq.append(rule, css.CSSRule.UNKNOWN_RULE, line = token[2], col = token[3])
-
- return expected
- new['wellformed'] = False
- self._log.error(u'Expected EOF.', token = token)
- return expected
-
-
- def COMMENT(expected, seq, token, tokenizer = (None, None)):
- if expected == 'EOF':
- new['wellformed'] = False
- self._log.error(u'Expected EOF but found comment.', token = token)
-
- seq.append(css.CSSComment([
- token]), 'COMMENT')
- return expected
-
-
- def S(expected, seq, token, tokenizer = (None, None)):
- if expected == 'EOF':
- new['wellformed'] = False
- self._log.error(u'Expected EOF but found whitespace.', token = token)
-
- return expected
-
-
- def EOF(expected = None, seq = None, token = None, tokenizer = None):
- return 'EOF'
-
- defaultproductions = {
- 'ATKEYWORD': ATKEYWORD,
- 'COMMENT': COMMENT,
- 'S': S,
- 'EOF': EOF }
- defaultproductions.update(productions)
- return defaultproductions
-
-
-
- class Seq(object):
-
- def __init__(self, readonly = True):
- self._seq = []
- self._readonly = readonly
-
-
- def __repr__(self):
- return self.__class__.__name__ % (u',\n '.join, [], []([ u'%r' % item for item in self._seq ]), self._readonly)
-
-
- def __str__(self):
- vals = []
- for v in self:
- if isinstance(v.value, basestring):
- vals.append(v.value)
- continue
- if type(v) == tuple:
- vals.append(v.value[1])
- continue
- vals.append(str(v))
-
- return '<cssutils.%s.%s object length=%r values=%r readonly=%r at 0x%x>' % (self.__module__, self.__class__.__name__, len(self), u', '.join(vals), self._readonly, id(self))
-
-
- def __delitem__(self, i):
- del self._seq[i]
-
-
- def __getitem__(self, i):
- return self._seq[i]
-
-
- def __setitem__(self, i, .2):
- (val, typ, line, col) = .2
- self._seq[i] = Item(val, typ, line, col)
-
-
- def __iter__(self):
- return iter(self._seq)
-
-
- def __len__(self):
- return len(self._seq)
-
-
- def absorb(self, other):
- self._seq = other._seq
-
-
- def append(self, val, typ, line = None, col = None):
- if self._readonly:
- raise AttributeError('Seq is readonly.')
- self._readonly
- self._seq.append(Item(val, typ, line, col))
-
-
- def appendItem(self, item):
- if self._readonly:
- raise AttributeError('Seq is readonly.')
- self._readonly
- self._seq.append(item)
-
-
- def replace(self, index = -1, val = None, typ = None, line = None, col = None):
- if self._readonly:
- raise AttributeError('Seq is readonly.')
- self._readonly
- self._seq[index] = Item(val, typ, line, col)
-
-
- def rstrip(self):
- while self._seq and self._seq[-1].type == tokenize2.CSSProductions.S:
- del self._seq[-1]
-
-
- def appendToVal(self, val = None, index = -1):
- if self._readonly:
- raise AttributeError('Seq is readonly.')
- self._readonly
- old = self._seq[index]
- self._seq[index] = Item(old.value + val, old.type, old.line, old.col)
-
-
-
- class Item(object):
-
- def __init__(self, value, type, line = None, col = None):
- self._Item__value = value
- self._Item__type = type
- self._Item__line = line
- self._Item__col = col
-
- type = property((lambda self: self._Item__type))
- value = property((lambda self: self._Item__value))
- line = property((lambda self: self._Item__line))
- col = property((lambda self: self._Item__col))
-
- def __repr__(self):
- return '%s.%s(value=%r, type=%r, line=%r, col=%r)' % (self.__module__, self.__class__.__name__, self._Item__value, self._Item__type, self._Item__line, self._Item__col)
-
-
-
- class ListSeq(object):
-
- def __init__(self):
- self.seq = []
-
-
- def __contains__(self, item):
- return item in self.seq
-
-
- def __delitem__(self, index):
- del self.seq[index]
-
-
- def __getitem__(self, index):
- return self.seq[index]
-
-
- def __iter__(self):
-
- def gen():
- for x in self.seq:
- yield x
-
-
- return gen()
-
-
- def __len__(self):
- return len(self.seq)
-
-
- def __setitem__(self, index, item):
- raise NotImplementedError
-
-
- def append(self, item):
- raise NotImplementedError
-
-
-
- class _Namespaces(object):
-
- def __init__(self, parentStyleSheet, log = None, *args):
- self.parentStyleSheet = parentStyleSheet
- self._log = log
-
-
- def __repr__(self):
- return '%r' % self.namespaces
-
-
- def __contains__(self, prefix):
- return prefix in self.namespaces
-
-
- def __delitem__(self, prefix):
- if not prefix:
- prefix = u''
-
- delrule = self._Namespaces__findrule(prefix)
- for i, rule in enumerate(ifilter((lambda r: r.type == r.NAMESPACE_RULE), self.parentStyleSheet.cssRules)):
- if rule == delrule:
- self.parentStyleSheet.deleteRule(i)
- return None
-
- self._log.error('Prefix %r not found.' % prefix, error = xml.dom.NamespaceErr)
-
-
- def __getitem__(self, prefix):
-
- try:
- return self.namespaces[prefix]
- except KeyError:
- e = None
- self._log.error('Prefix %r not found.' % prefix, error = xml.dom.NamespaceErr)
-
-
-
- def __iter__(self):
- return self.namespaces.__iter__()
-
-
- def __len__(self):
- return len(self.namespaces)
-
-
- def __setitem__(self, prefix, namespaceURI):
- if not prefix:
- prefix = u''
-
- rule = self._Namespaces__findrule(prefix)
- if not rule:
- self.parentStyleSheet.insertRule(css.CSSNamespaceRule(prefix = prefix, namespaceURI = namespaceURI), inOrder = True)
- elif prefix in self.namespaces:
- rule.namespaceURI = namespaceURI
-
- if namespaceURI in self.namespaces.values():
- rule.prefix = prefix
-
-
-
- def _Namespaces__findrule(self, prefix):
- for rule in ifilter((lambda r: r.type == r.NAMESPACE_RULE), reversed(self.parentStyleSheet.cssRules)):
- if rule.prefix == prefix:
- return rule
-
-
-
- def namespaces(self):
- namespaces = { }
- for rule in ifilter((lambda r: r.type == r.NAMESPACE_RULE), reversed(self.parentStyleSheet.cssRules)):
- if rule.namespaceURI not in namespaces.values():
- namespaces[rule.prefix] = rule.namespaceURI
- continue
-
- return namespaces
-
- namespaces = property(namespaces)
-
- def get(self, prefix, default):
- return self.namespaces.get(prefix, default)
-
-
- def items(self):
- return self.namespaces.items()
-
-
- def keys(self):
- return self.namespaces.keys()
-
-
- def values(self):
- return self.namespaces.values()
-
-
- def prefixForNamespaceURI(self, namespaceURI):
- for prefix, uri in self.namespaces.items():
- if uri == namespaceURI:
- return prefix
-
- raise IndexError(u'NamespaceURI %r not found.' % namespaceURI)
-
-
- def __str__(self):
- return u'<cssutils.util.%s object parentStyleSheet=%r at 0x%x>' % (self.__class__.__name__, str(self.parentStyleSheet), id(self))
-
-
-
- class _SimpleNamespaces(_Namespaces):
-
- def __init__(self, log = None, *args):
- super(_SimpleNamespaces, self).__init__(parentStyleSheet = None, log = log)
- self._SimpleNamespaces__namespaces = dict(*args)
-
-
- def __setitem__(self, prefix, namespaceURI):
- self._SimpleNamespaces__namespaces[prefix] = namespaceURI
-
- namespaces = property((lambda self: self._SimpleNamespaces__namespaces), doc = u'Dict Wrapper for self.sheets @namespace rules.')
-
- def __str__(self):
- return u'<cssutils.util.%s object namespaces=%r at 0x%x>' % (self.__class__.__name__, self.namespaces, id(self))
-
-
- def __repr__(self):
- return u'cssutils.util.%s(%r)' % (self.__class__.__name__, self.namespaces)
-
-
-
- def _readUrl(url, fetcher = None, overrideEncoding = None, parentEncoding = None):
- enctype = None
- if not fetcher:
- fetcher = _defaultFetcher
-
- r = fetcher(url)
- if r and len(r) == 2 and r[1] is not None:
- (httpEncoding, content) = r
- if overrideEncoding:
- enctype = 0
- encoding = overrideEncoding
- elif httpEncoding:
- enctype = 1
- encoding = httpEncoding
- elif isinstance(content, unicode):
- explicit = False
- else:
- (contentEncoding, explicit) = codec.detectencoding_str(content)
- if explicit:
- enctype = 2
- encoding = contentEncoding
- elif parentEncoding:
- enctype = 4
- encoding = parentEncoding
- else:
- enctype = 5
- encoding = 'utf-8'
- if isinstance(content, unicode):
- decodedCssText = content
- else:
-
- try:
- decodedCssText = codecs.lookup('css')[1](content, encoding = encoding)[0]
- except UnicodeDecodeError:
- e = None
- log.warn(e, neverraise = True)
- decodedCssText = None
-
- return (encoding, enctype, decodedCssText)
- return (None, None, None)
-
-