From 7852a27574b0c4b006ca72f61d8567d36a0dfbc8 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Tue, 21 Aug 2012 18:53:50 +0200 Subject: [PATCH 01/28] Fix tokenizing \5c \5c was unescaped into \ which was then mistakenly removed by simple_unescape. --- CHANGES | 9 +++++++++ tinycss/speedups.pyx | 12 ++++++------ tinycss/tests/test_tokenizer.py | 4 ++++ tinycss/token_data.py | 5 +++-- tinycss/tokenizer.py | 12 ++++++------ tinycss/version.py | 2 +- 6 files changed, 29 insertions(+), 15 deletions(-) diff --git a/CHANGES b/CHANGES index fe1fcd8..f98be83 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,15 @@ tinycss changelog ================= +Version 0.3 +----------- + +Not released yet. + +* Fix a bug when parsing \5c (an escaped antislash.) + + + Version 0.2 ----------- diff --git a/tinycss/speedups.pyx b/tinycss/speedups.pyx index d90a09f..cb75aee 100644 --- a/tinycss/speedups.pyx +++ b/tinycss/speedups.pyx @@ -130,8 +130,8 @@ def tokenize_flat(css_source, int ignore_comments=1): value = match.group(1) value = float(value) if '.' in value else int(value) unit = match.group(2) - unit = unicode_unescape(unit) unit = simple_unescape(unit) + unit = unicode_unescape(unit) unit = unit.lower() # normalize elif type_ == PERCENTAGE: value = css_value[:-1] @@ -145,20 +145,20 @@ def tokenize_flat(css_source, int ignore_comments=1): value = int(value) type_name = 'INTEGER' elif type_ in (IDENT, ATKEYWORD, HASH, FUNCTION): - value = unicode_unescape(css_value) - value = simple_unescape(value) + value = simple_unescape(css_value) + value = unicode_unescape(value) elif type_ == URI: value = match.group(1) if value and value[0] in '"\'': value = value[1:-1] # Remove quotes value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) elif type_ == STRING: value = css_value[1:-1] # Remove quotes value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) # BAD_STRING can only be one of: # * Unclosed string at the end of the stylesheet: # Close the string, but this is not an error. @@ -171,8 +171,8 @@ def tokenize_flat(css_source, int ignore_comments=1): type_name = 'STRING' value = css_value[1:] # Remove quote value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) else: value = css_value token = CToken(type_name, css_value, value, unit, line, column) diff --git a/tinycss/tests/test_tokenizer.py b/tinycss/tests/test_tokenizer.py index 4508204..8ad6c59 100644 --- a/tinycss/tests/test_tokenizer.py +++ b/tinycss/tests/test_tokenizer.py @@ -79,7 +79,11 @@ def test_speedups(): # Cancel the meaning of special characters (r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal + (r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]), + (r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]), (r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]), + (r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]), + (r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]), (r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]), (r'Lorem+Ipsum', [('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]), (r'url(foo\).png)', [('URI', 'foo).png')]), diff --git a/tinycss/token_data.py b/tinycss/token_data.py index d2c2cba..dcd9232 100644 --- a/tinycss/token_data.py +++ b/tinycss/token_data.py @@ -43,7 +43,8 @@ w [ \t\r\n\f]* nonascii [^\0-\237] unicode \\([0-9a-f]{{1,6}})(\r\n|[ \n\r\t\f])? - escape {unicode}|\\[^\n\r\f0-9a-f] + simple_escape [^\n\r\f0-9a-f] + escape {unicode}|\\{simple_escape} nmstart [_a-z]|{nonascii}|{escape} nmchar [_a-z0-9-]|{nonascii}|{escape} name {nmchar}+ @@ -205,7 +206,7 @@ def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode): '') SIMPLE_UNESCAPE = functools.partial( - re.compile(r'\\(.)').sub, + re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'] , re.I).sub, # Same as r'\1', but faster on CPython operator.methodcaller('group', 1)) diff --git a/tinycss/tokenizer.py b/tinycss/tokenizer.py index 5540027..eba44c1 100644 --- a/tinycss/tokenizer.py +++ b/tinycss/tokenizer.py @@ -79,8 +79,8 @@ def tokenize_flat(css_source, ignore_comments=True, value = match.group(1) value = float(value) if '.' in value else int(value) unit = match.group(2) - unit = unicode_unescape(unit) unit = simple_unescape(unit) + unit = unicode_unescape(unit) unit = unit.lower() # normalize elif type_ == 'PERCENTAGE': value = css_value[:-1] @@ -94,20 +94,20 @@ def tokenize_flat(css_source, ignore_comments=True, value = int(value) type_ = 'INTEGER' elif type_ in ('IDENT', 'ATKEYWORD', 'HASH', 'FUNCTION'): - value = unicode_unescape(css_value) - value = simple_unescape(value) + value = simple_unescape(css_value) + value = unicode_unescape(value) elif type_ == 'URI': value = match.group(1) if value and value[0] in '"\'': value = value[1:-1] # Remove quotes value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) elif type_ == 'STRING': value = css_value[1:-1] # Remove quotes value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) # BAD_STRING can only be one of: # * Unclosed string at the end of the stylesheet: # Close the string, but this is not an error. @@ -120,8 +120,8 @@ def tokenize_flat(css_source, ignore_comments=True, type_ = 'STRING' value = css_value[1:] # Remove quote value = newline_unescape(value) - value = unicode_unescape(value) value = simple_unescape(value) + value = unicode_unescape(value) else: value = css_value tokens.append(Token(type_, css_value, value, unit, line, column)) diff --git a/tinycss/version.py b/tinycss/version.py index 68c0733..014a8e4 100644 --- a/tinycss/version.py +++ b/tinycss/version.py @@ -1 +1 @@ -VERSION = '0.2' +VERSION = '0.3' From d2cf7f03bbd3652532ba4b0ee211a8fd0233b5b0 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Thu, 23 Aug 2012 23:49:45 +0200 Subject: [PATCH 02/28] Add support for @page:blank pseudo-class --- tinycss/page3.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tinycss/page3.py b/tinycss/page3.py index 1d1a51e..3c87860 100644 --- a/tinycss/page3.py +++ b/tinycss/page3.py @@ -130,7 +130,7 @@ def parse_page_selector(self, head): The ``head`` attribute of an unparsed :class:`AtRule`. :returns: A page selector. For CSS 2.1, this is 'first', 'left', 'right' - or None. + or None. 'blank' is added by GCPM. :raises: :class`~parsing.ParseError` on invalid selectors @@ -151,7 +151,8 @@ def parse_page_selector(self, head): and head[1].type == 'IDENT'): pseudo_class = head[1].value specificity = { - 'first': (1, 0), 'left': (0, 1), 'right': (0, 1), + 'first': (1, 0), 'blank': (1, 0), + 'left': (0, 1), 'right': (0, 1), }.get(pseudo_class) if specificity: return (name, pseudo_class), (name_specificity + specificity) From 8a22d6109957e81fde038eb133953f90734a0c4a Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 24 Aug 2012 00:00:10 +0200 Subject: [PATCH 03/28] Add test for blank pages --- tinycss/tests/test_page3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tinycss/tests/test_page3.py b/tinycss/tests/test_page3.py index 15c9e57..aa2f073 100644 --- a/tinycss/tests/test_page3.py +++ b/tinycss/tests/test_page3.py @@ -24,6 +24,7 @@ ('@page :first {}', (None, 'first'), (0, 1, 0), []), ('@page:left{}', (None, 'left'), (0, 0, 1), []), ('@page :right {}', (None, 'right'), (0, 0, 1), []), + ('@page :blank{}', (None, 'blank'), (0, 1, 0), []), ('@page :last {}', None, None, ['invalid @page selector']), ('@page : first {}', None, None, ['invalid @page selector']), From 28474f78e0a3843c48b7f3d859375b6b379547b8 Mon Sep 17 00:00:00 2001 From: Brian Lee Date: Tue, 17 Jun 2014 14:46:48 -0400 Subject: [PATCH 04/28] Add an __eq__ operator to Token object --- tinycss/token_data.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tinycss/token_data.py b/tinycss/token_data.py index dcd9232..d226ac1 100644 --- a/tinycss/token_data.py +++ b/tinycss/token_data.py @@ -328,6 +328,16 @@ def as_css(self): def __repr__(self): return ('' .format(self, self.unit or '')) + + def __eq__(self, other): + if type(self) != type(other): + raise TypeError("Cannot compare %s and %s" % type(self), type(other)) + else: return all( + self.type_ == other.type_, + self._as_css == other._as_css, + self.value == other.value, + self.unit == other.unit, + ) class ContainerToken(object): From b0cefeb39f5e5366040528b4674c88030cd2d88e Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Wed, 18 Jun 2014 13:46:37 +0200 Subject: [PATCH 05/28] Stop testing on Python 3.1, start on 3.3 --- .travis.yml | 2 +- tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4391da2..7bb92d9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,8 +3,8 @@ language: python python: - "2.6" - "2.7" - - "3.1" - "3.2" + - "3.3" install: - pip install --use-mirrors Cython diff --git a/tox.ini b/tox.ini index e951568..dd97398 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,py31,py32,pypy,sphinx-doctests +envlist = py26,py27,py32,py33,pypy,sphinx-doctests [testenv] deps = pytest From 83460368649aca6654d246654fe069d521672cbc Mon Sep 17 00:00:00 2001 From: Alex Eftimie Date: Wed, 25 Nov 2015 09:46:20 +0200 Subject: [PATCH 06/28] Replace utf8 with utf-8 for gettext compatibility --- tinycss/__init__.py | 2 +- tinycss/color3.py | 2 +- tinycss/css21.py | 2 +- tinycss/decoding.py | 2 +- tinycss/page3.py | 2 +- tinycss/parsing.py | 2 +- tinycss/speedups.pyx | 2 +- tinycss/tests/__init__.py | 2 +- tinycss/tests/speed.py | 2 +- tinycss/tests/test_api.py | 2 +- tinycss/tests/test_color3.py | 2 +- tinycss/tests/test_css21.py | 2 +- tinycss/tests/test_decoding.py | 2 +- tinycss/tests/test_page3.py | 2 +- tinycss/tests/test_tokenizer.py | 2 +- tinycss/token_data.py | 2 +- tinycss/tokenizer.py | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tinycss/__init__.py b/tinycss/__init__.py index 9eca2b1..04001b6 100644 --- a/tinycss/__init__.py +++ b/tinycss/__init__.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss ------- diff --git a/tinycss/color3.py b/tinycss/color3.py index 187196e..7d32628 100644 --- a/tinycss/color3.py +++ b/tinycss/color3.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.colors3 --------------- diff --git a/tinycss/css21.py b/tinycss/css21.py index 51e6529..aeb9222 100644 --- a/tinycss/css21.py +++ b/tinycss/css21.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.css21 ------------- diff --git a/tinycss/decoding.py b/tinycss/decoding.py index 6303e1a..66ccbe2 100644 --- a/tinycss/decoding.py +++ b/tinycss/decoding.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.decoding ---------------- diff --git a/tinycss/page3.py b/tinycss/page3.py index 3c87860..c901ed5 100644 --- a/tinycss/page3.py +++ b/tinycss/page3.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.page3 ------------------ diff --git a/tinycss/parsing.py b/tinycss/parsing.py index 86e93c0..8479e72 100644 --- a/tinycss/parsing.py +++ b/tinycss/parsing.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.parsing --------------- diff --git a/tinycss/speedups.pyx b/tinycss/speedups.pyx index cb75aee..49edfa2 100644 --- a/tinycss/speedups.pyx +++ b/tinycss/speedups.pyx @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.speedups ---------------- diff --git a/tinycss/tests/__init__.py b/tinycss/tests/__init__.py index c7a89e0..1babd98 100644 --- a/tinycss/tests/__init__.py +++ b/tinycss/tests/__init__.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Test suite for tinycss ---------------------- diff --git a/tinycss/tests/speed.py b/tinycss/tests/speed.py index 2777d4b..b80141e 100644 --- a/tinycss/tests/speed.py +++ b/tinycss/tests/speed.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Speed tests ----------- diff --git a/tinycss/tests/test_api.py b/tinycss/tests/test_api.py index 01caa3f..7eb007e 100644 --- a/tinycss/tests/test_api.py +++ b/tinycss/tests/test_api.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for the public API ------------------------ diff --git a/tinycss/tests/test_color3.py b/tinycss/tests/test_color3.py index 3d86785..79c37dc 100644 --- a/tinycss/tests/test_color3.py +++ b/tinycss/tests/test_color3.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for the CSS 3 color parser -------------------------------- diff --git a/tinycss/tests/test_css21.py b/tinycss/tests/test_css21.py index 48626d7..3af497c 100644 --- a/tinycss/tests/test_css21.py +++ b/tinycss/tests/test_css21.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for the CSS 2.1 parser ---------------------------- diff --git a/tinycss/tests/test_decoding.py b/tinycss/tests/test_decoding.py index 42df0c3..e2c88a4 100644 --- a/tinycss/tests/test_decoding.py +++ b/tinycss/tests/test_decoding.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for decoding bytes to Unicode ----------------------------------- diff --git a/tinycss/tests/test_page3.py b/tinycss/tests/test_page3.py index aa2f073..9c55b47 100644 --- a/tinycss/tests/test_page3.py +++ b/tinycss/tests/test_page3.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for the Paged Media 3 parser ---------------------------------- diff --git a/tinycss/tests/test_tokenizer.py b/tinycss/tests/test_tokenizer.py index 8ad6c59..51328b8 100644 --- a/tinycss/tests/test_tokenizer.py +++ b/tinycss/tests/test_tokenizer.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ Tests for the tokenizer ----------------------- diff --git a/tinycss/token_data.py b/tinycss/token_data.py index d226ac1..32fc26f 100644 --- a/tinycss/token_data.py +++ b/tinycss/token_data.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.token_data ------------------ diff --git a/tinycss/tokenizer.py b/tinycss/tokenizer.py index eba44c1..e4a4661 100644 --- a/tinycss/tokenizer.py +++ b/tinycss/tokenizer.py @@ -1,4 +1,4 @@ -# coding: utf8 +# coding: utf-8 """ tinycss.tokenizer ----------------- From 71095fdd1dcc2aeca9b7a31e72da2b120a9ab5f4 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 20 Jul 2016 10:26:49 +0200 Subject: [PATCH 07/28] Don't use pystil anymore in docs --- docs/_templates/layout.html | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index 8613b2f..3ac9078 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -1,10 +1,4 @@ {% extends "!layout.html" %} {% block extrahead %} - {% endblock %} From 7ac8bc3f044f6f787368f4c35c00356819880af8 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 13:12:19 +0200 Subject: [PATCH 08/28] Support CSS Fonts Module Level 3 at-rules See https://www.w3.org/TR/css-fonts-3/. This commit adds support of @font-face and @font-feature-values. Fix #6. --- tinycss/__init__.py | 4 + tinycss/fonts3.py | 202 +++++++++++++++++++++++++++++++++++ tinycss/tests/test_fonts3.py | 149 ++++++++++++++++++++++++++ 3 files changed, 355 insertions(+) create mode 100644 tinycss/fonts3.py create mode 100644 tinycss/tests/test_fonts3.py diff --git a/tinycss/__init__.py b/tinycss/__init__.py index 04001b6..3848b33 100644 --- a/tinycss/__init__.py +++ b/tinycss/__init__.py @@ -16,10 +16,12 @@ from .css21 import CSS21Parser from .page3 import CSSPage3Parser +from .fonts3 import CSSFonts3Parser PARSER_MODULES = { 'page3': CSSPage3Parser, + 'fonts3': CSSFonts3Parser, } @@ -30,6 +32,8 @@ def make_parser(*features, **kwargs): Positional arguments are base classes the new parser class will extend. The string ``'page3'`` is accepted as short for :class:`~page3.CSSPage3Parser`. + The string ``'fonts3'`` is accepted as short for + :class:`~fonts3.CSSFonts3Parser`. :param kwargs: Keyword arguments are passed to the parser’s constructor. :returns: diff --git a/tinycss/fonts3.py b/tinycss/fonts3.py new file mode 100644 index 0000000..9543466 --- /dev/null +++ b/tinycss/fonts3.py @@ -0,0 +1,202 @@ +# coding: utf-8 +""" + tinycss.colors3 + --------------- + + Parser for CSS 3 Fonts syntax: + https://www.w3.org/TR/css-fonts-3/ + + Adds support for font-face and font-feature-values rules. + + :copyright: (c) 2016 by Kozea. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import unicode_literals, division + +from .css21 import CSS21Parser, ParseError + + +class FontFaceRule(object): + """A parsed at-rule for font faces. + + .. attribute:: at_keyword + + Always ``'@font-face'``. + + .. attribute:: declarations + + A list of :class:`~.css21.Declaration` objects. + + .. attribute:: line + + Source line where this was read. + + .. attribute:: column + + Source column where this was read. + + """ + + def __init__(self, at_keyword, declarations, line, column): + assert at_keyword == '@font-face' + self.at_keyword = at_keyword + self.declarations = declarations + self.line = line + self.column = column + + +class FontFeatureValuesRule(object): + """A parsed at-rule for font feature values. + + .. attribute:: at_keyword + + Always ``'@font-feature-values'``. + + .. attribute:: line + + Source line where this was read. + + .. attribute:: column + + Source column where this was read. + + .. attribute:: at_rules + + The list of parsed at-rules inside the @font-feature-values block, in + source order. + + .. attribute:: family_names + + A list of strings representing font families. + + """ + + def __init__(self, at_keyword, at_rules, family_names, line, column): + assert at_keyword == '@font-feature-values' + self.at_keyword = at_keyword + self.family_names = family_names + self.at_rules = at_rules + self.line = line + self.column = column + + +class FontFeatureRule(object): + """A parsed at-rule for font features. + + .. attribute:: at_keyword + + One of the 16 following strings: + + * ``@stylistic`` + * ``@styleset`` + * ``@character-variant`` + * ``@swash`` + * ``@ornaments`` + * ``@annotation`` + + .. attribute:: declarations + + A list of :class:`~.css21.Declaration` objects. + + .. attribute:: line + + Source line where this was read. + + .. attribute:: column + + Source column where this was read. + + """ + + def __init__(self, at_keyword, declarations, line, column): + self.at_keyword = at_keyword + self.declarations = declarations + self.line = line + self.column = column + + +class CSSFonts3Parser(CSS21Parser): + """Extend :class:`~.css21.CSS21Parser` for `CSS 3 Fonts`_ syntax. + + .. _CSS 3 Fonts: https://www.w3.org/TR/css-fonts-3/ + + """ + + FONT_FEATURE_VALUES_AT_KEYWORDS = [ + '@stylistic', + '@styleset', + '@character-variant', + '@swash', + '@ornaments', + '@annotation', + ] + + def parse_at_rule(self, rule, previous_rules, errors, context): + if rule.at_keyword == '@font-face': + if rule.head: + raise ParseError(rule.head[0], + 'unexpected %s token in %s rule header' + % (rule.head[0].type, rule.at_keyword)) + declarations, body_errors = self.parse_declaration_list(rule.body) + errors.extend(body_errors) + names = [declaration.name for declaration in declarations] + if 'src' not in names or 'font-family' not in names: + raise ParseError(rule, + '@font-face rule needs src and font-family descriptors') + return FontFaceRule( + rule.at_keyword, declarations, rule.line, rule.column) + elif rule.at_keyword == '@font-feature-values': + family_names = tuple( + self.parse_font_feature_values_family_names(rule.head)) + at_rules, body_errors = ( + self.parse_rules(rule.body or [], '@font-feature-values')) + errors.extend(body_errors) + return FontFeatureValuesRule( + rule.at_keyword, at_rules, family_names, + rule.line, rule.column) + elif rule.at_keyword in self.FONT_FEATURE_VALUES_AT_KEYWORDS: + if context != '@font-feature-values': + raise ParseError(rule, + '%s rule not allowed in %s' % (rule.at_keyword, context)) + declarations, body_errors = self.parse_declaration_list(rule.body) + errors.extend(body_errors) + return FontFeatureRule( + rule.at_keyword, declarations, rule.line, rule.column) + return super(CSSFonts3Parser, self).parse_at_rule( + rule, previous_rules, errors, context) + + def parse_font_feature_values_family_names(self, tokens): + """Parse an @font-feature-values selector. + + :param tokens: + An iterable of token, typically from the ``head`` attribute of + an unparsed :class:`AtRule`. + :returns: + A generator of strings representing font families. + :raises: + :class:`~.parsing.ParseError` on invalid selectors + + """ + family = '' + current_string = False + for token in tokens: + if token.type == 'DELIM' and token.value == ',' and family: + yield family + family = '' + current_string = False + elif token.type == 'STRING' and not family and ( + current_string is False): + family = token.value + current_string = True + elif token.type == 'IDENT' and not current_string: + if family: + family += ' ' + family += token.value + elif token.type != 'S': + family = '' + break + if family: + yield family + else: + raise ParseError(token, 'invalid @font-feature-values selector') diff --git a/tinycss/tests/test_fonts3.py b/tinycss/tests/test_fonts3.py new file mode 100644 index 0000000..c31a1c1 --- /dev/null +++ b/tinycss/tests/test_fonts3.py @@ -0,0 +1,149 @@ +# coding: utf-8 +""" + Tests for the Fonts 3 parser + ---------------------------- + + :copyright: (c) 2016 by Kozea. + :license: BSD, see LICENSE for more details. +""" + + +from __future__ import unicode_literals + +import pytest + +from tinycss.fonts3 import CSSFonts3Parser +from .test_tokenizer import jsonify +from . import assert_errors + + +@pytest.mark.parametrize(('css', 'expected_family_names', 'expected_errors'), [ + ('@font-feature-values foo {}', ('foo',), []), + ('@font-feature-values Foo Test {}', ('Foo Test',), []), + ('@font-feature-values \'Foo Test\' {}', ('Foo Test',), []), + ('@font-feature-values Foo Test, Foo Lol, "Foo tooo"', ( + 'Foo Test', 'Foo Lol', 'Foo tooo'), []), + ('@font-feature-values Foo , Foo lol {}', ('Foo', 'Foo lol'), []), + ('@font-feature-values Foo , "Foobar" , Lol {}', ( + 'Foo', 'Foobar', 'Lol'), []), + ('@font-feature-values Foo, {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values ,Foo {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values Test,"Foo", {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values Test "Foo" {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values Test Foo, Test "bar", "foo" {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values Test/Foo {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values /Foo {}', None, [ + 'invalid @font-feature-values selector']), + ('@font-feature-values #Foo {}', None, [ + 'invalid @font-feature-values selector']), + # TODO: this currently works but should not work + #('@font-feature-values test@foo {}', None, [ + # 'invalid @font-feature-values selector']), + ('@font-feature-values Hawaii 5-0 {}', None, [ + 'invalid @font-feature-values selector']), +]) +def test_font_feature_values_selectors(css, expected_family_names, + expected_errors): + stylesheet = CSSFonts3Parser().parse_stylesheet(css) + assert_errors(stylesheet.errors, expected_errors) + + if stylesheet.rules: + assert len(stylesheet.rules) == 1 + rule = stylesheet.rules[0] + assert rule.at_keyword == '@font-feature-values' + assert rule.family_names == expected_family_names + + +@pytest.mark.parametrize(('css', 'expected_declarations', 'expected_errors'), [ + ('@font-face {}', None, [ + '@font-face rule needs src and font-family descriptors']), + ('@font-face {src:"lol"}', None, [ + '@font-face rule needs src and font-family descriptors']), + ('@font-face {font-family:"lol"}', None, [ + '@font-face rule needs src and font-family descriptors']), + ('@font-face test { src: "lol"; font-family: "bar" }', None, [ + 'unexpected IDENT token in @font-face rule header']), + ('@font-face { src: "lol"; font-family: "bar" }', [ + ('src', [('STRING', 'lol')]), + ('font-family', [('STRING', 'bar')])], []), + ('@font-face { src: "lol"; font-family: "bar"; src: "baz" }', [ + ('src', [('STRING', 'lol')]), + ('font-family', [('STRING', 'bar')]), + ('src', [('STRING', 'baz')])], []), +]) +def test_font_face_content(css, expected_declarations, expected_errors): + stylesheet = CSSFonts3Parser().parse_stylesheet(css) + assert_errors(stylesheet.errors, expected_errors) + + def declarations(rule): + return [(decl.name, list(jsonify(decl.value))) + for decl in rule.declarations] + + if expected_declarations is None: + assert stylesheet.rules == [] + assert expected_errors + else: + assert len(stylesheet.rules) == 1 + rule = stylesheet.rules[0] + assert rule.at_keyword == '@font-face' + assert declarations(rule) == expected_declarations + + +@pytest.mark.parametrize( + ('css', 'expected_rules', 'expected_errors'), [ + ('''@annotation{}''', None, [ + '@annotation rule not allowed in stylesheet']), + ('''@font-feature-values foo {}''', None, []), + ('''@font-feature-values foo { + @swash { ornate: 1; } + @styleset { double-W: 14; sharp-terminals: 16 1; } + }''', [ + ('@swash', [('ornate', [('INTEGER', 1)])]), + ('@styleset', [ + ('double-w', [('INTEGER', 14)]), + ('sharp-terminals', [ + ('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])])], []), + ('''@font-feature-values foo { + @swash { ornate: 14; } + @unknown { test: 1; } + }''', [('@swash', [('ornate', [('INTEGER', 14)])])], [ + 'unknown at-rule in @font-feature-values context: @unknown']), + ('''@font-feature-values foo { + @annotation{boxed:1} + bad: 2; + @brokenstylesetbecauseofbadabove { sharp: 1} + @styleset { sharp-terminals: 16 1; @bad {}} + @styleset { @bad {} top-ignored: 3; top: 9000} + really-bad + }''', [ + ('@annotation', [('boxed', [('INTEGER', 1)])]), + ('@styleset', [ + ('sharp-terminals', [ + ('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])]), + ('@styleset', [('top', [('INTEGER', 9000)])])], [ + 'unexpected ; token in selector', + 'expected a property name, got ATKEYWORD', + 'expected a property name, got ATKEYWORD', + 'no declaration block found for ruleset']), + ]) +def test_font_feature_values_content(css, expected_rules, expected_errors): + stylesheet = CSSFonts3Parser().parse_stylesheet(css) + assert_errors(stylesheet.errors, expected_errors) + + if expected_rules is not None: + assert len(stylesheet.rules) == 1 + rule = stylesheet.rules[0] + assert rule.at_keyword == '@font-feature-values' + + rules = [ + (rule.at_keyword, [ + (decl.name, list(jsonify(decl.value))) + for decl in rule.declarations]) + for rule in rule.at_rules] if rule.at_rules else None + assert rules == expected_rules From 7c88594a9c8c19eec17ac5c1e3bc6ce57ec75f61 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 13:21:49 +0200 Subject: [PATCH 09/28] Try to fix tests on Travis, and update setup.py --- .travis.yml | 13 ++++++++----- setup.cfg | 6 ++++++ setup.py | 16 ++++++++++++---- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7bb92d9..618318d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,16 @@ language: python python: - - "2.6" - "2.7" - - "3.2" - "3.3" + - "3.4" + - "3.5" + - "pypy" + - "pypy3" install: - - pip install --use-mirrors Cython - - pip install --use-mirrors -e . + - pip install Cython + - pip install --upgrade -e .[test] -script: py.test +script: + - python setup.py test diff --git a/setup.cfg b/setup.cfg index 836ca93..09ecc38 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,3 +6,9 @@ build-dir = docs/_build [upload_sphinx] # Sphinx-PyPI-upload upload-dir = docs/_build/html +[aliases] +test = pytest + +[tool:pytest] +addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference +norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference diff --git a/setup.py b/setup.py index 6c0ec6d..da7e4e8 100644 --- a/setup.py +++ b/setup.py @@ -46,6 +46,9 @@ def build_extension(self, ext): README = fd.read().decode('utf8') +needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv) +pytest_runner = ['pytest-runner'] if needs_pytest else [] + def run_setup(with_extension): if with_extension: extension_path = os.path.join('tinycss', 'speedups') @@ -78,16 +81,21 @@ def run_setup(with_extension): description='tinycss is a complete yet simple CSS parser for Python.', long_description=README, classifiers=[ - 'Development Status :: 3 - Alpha', + 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.1', - 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', ], + setup_requires=pytest_runner, + tests_require=[ + 'pytest-cov', 'pytest-flake8', 'pytest-isort', 'pytest-runner'], + extras_require={'test': ( + 'pytest-runner', 'pytest-cov', 'pytest-flake8', 'pytest-isort')}, packages=['tinycss', 'tinycss.tests'], **kwargs ) From 6740930dcb09af3eac57833b050202dc621efa40 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 14:13:54 +0200 Subject: [PATCH 10/28] Fix flake8 and isort errors --- setup.cfg | 2 + setup.py | 22 +- tinycss/__init__.py | 5 +- tinycss/color3.py | 3 +- tinycss/css21.py | 64 +++--- tinycss/decoding.py | 170 ++++++++-------- tinycss/fonts3.py | 17 +- tinycss/page3.py | 23 ++- tinycss/parsing.py | 5 +- tinycss/tests/speed.py | 11 +- tinycss/tests/test_api.py | 2 - tinycss/tests/test_color3.py | 51 +++-- tinycss/tests/test_css21.py | 13 +- tinycss/tests/test_decoding.py | 5 +- tinycss/tests/test_fonts3.py | 16 +- tinycss/tests/test_page3.py | 8 +- tinycss/tests/test_tokenizer.py | 347 ++++++++++++++++---------------- tinycss/token_data.py | 27 +-- tinycss/tokenizer.py | 35 ++-- 19 files changed, 414 insertions(+), 412 deletions(-) diff --git a/setup.cfg b/setup.cfg index 09ecc38..5b2ad96 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,3 +12,5 @@ test = pytest [tool:pytest] addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference +flake8-ignore = docs/conf.py ALL +isort_ignore = docs/conf.py \ No newline at end of file diff --git a/setup.py b/setup.py index da7e4e8..13a1749 100644 --- a/setup.py +++ b/setup.py @@ -1,9 +1,10 @@ +import os.path import re import sys -import os.path -from setuptools import setup, Extension from distutils.errors import ( CCompilerError, DistutilsExecError, DistutilsPlatformError) +from setuptools import Extension, setup + try: from Cython.Distutils import build_ext import Cython.Compiler.Version @@ -15,13 +16,15 @@ ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32' and sys.version_info > (2, 6): - # 2.6's distutils.msvc9compiler can raise an IOError when failing to - # find the compiler - ext_errors += (IOError,) + # 2.6's distutils.msvc9compiler can raise an IOError when failing to + # find the compiler + ext_errors += (IOError,) + class BuildFailed(Exception): pass + class ve_build_ext(build_ext): # This class allows C extension building to fail. @@ -49,6 +52,7 @@ def build_extension(self, ext): needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv) pytest_runner = ['pytest-runner'] if needs_pytest else [] + def run_setup(with_extension): if with_extension: extension_path = os.path.join('tinycss', 'speedups') @@ -58,11 +62,11 @@ def run_setup(with_extension): else: extension_path += '.c' if not os.path.exists(extension_path): - print ("WARNING: Trying to build without Cython, but " - "pre-generated '%s' does not seem to be available." - % extension_path) + print("WARNING: Trying to build without Cython, but " + "pre-generated '%s' does not seem to be available." + % extension_path) else: - print ('Building without Cython.') + print('Building without Cython.') kwargs = dict( cmdclass=dict(build_ext=ve_build_ext), ext_modules=[Extension('tinycss.speedups', diff --git a/tinycss/__init__.py b/tinycss/__init__.py index 3848b33..aba135c 100644 --- a/tinycss/__init__.py +++ b/tinycss/__init__.py @@ -9,16 +9,15 @@ :license: BSD, see LICENSE for more details. """ -import sys - from .version import VERSION -__version__ = VERSION from .css21 import CSS21Parser from .page3 import CSSPage3Parser from .fonts3 import CSSFonts3Parser +__version__ = VERSION + PARSER_MODULES = { 'page3': CSSPage3Parser, 'fonts3': CSSFonts3Parser, diff --git a/tinycss/color3.py b/tinycss/color3.py index 7d32628..92eed46 100644 --- a/tinycss/color3.py +++ b/tinycss/color3.py @@ -13,7 +13,8 @@ :license: BSD, see LICENSE for more details. """ -from __future__ import unicode_literals, division +from __future__ import division, unicode_literals + import collections import itertools import re diff --git a/tinycss/css21.py b/tinycss/css21.py index aeb9222..e611a5e 100644 --- a/tinycss/css21.py +++ b/tinycss/css21.py @@ -11,13 +11,15 @@ """ from __future__ import unicode_literals + from itertools import chain, islice from .decoding import decode +from .parsing import ( + ParseError, remove_whitespace, split_on_comma, strip_whitespace, + validate_any, validate_value) from .token_data import TokenList from .tokenizer import tokenize_grouped -from .parsing import (strip_whitespace, remove_whitespace, split_on_comma, - validate_value, validate_block, validate_any, ParseError) # stylesheet : [ CDO | CDC | S | statement ]*; @@ -293,7 +295,6 @@ def __repr__(self): ' {0.uri}>'.format(self)) - def _remove_at_charset(tokens): """Remove any valid @charset at the beggining of a token stream. @@ -307,8 +308,8 @@ def _remove_at_charset(tokens): header = list(islice(tokens, 4)) if [t.type for t in header] == ['ATKEYWORD', 'S', 'STRING', ';']: atkw, space, string, semicolon = header - if ((atkw.value, space.value) == ('@charset', ' ') - and string.as_css()[0] == '"'): + if ((atkw.value, space.value) == ('@charset', ' ') and + string.as_css()[0] == '"'): # Found a valid @charset rule, only keep what’s after it. return tokens return chain(header, tokens) @@ -331,7 +332,7 @@ class CSS21Parser(object): # User API: def parse_stylesheet_file(self, css_file, protocol_encoding=None, - linking_encoding=None, document_encoding=None): + linking_encoding=None, document_encoding=None): """Parse a stylesheet from a file or filename. Character encoding-related parameters and behavior are the same @@ -512,8 +513,9 @@ def parse_at_rule(self, rule, previous_rules, errors, context): raise ParseError(rule, '@page rule not allowed in ' + context) selector, specificity = self.parse_page_selector(rule.head) if rule.body is None: - raise ParseError(rule, - 'invalid {0} rule: missing block'.format(rule.at_keyword)) + raise ParseError( + rule, 'invalid {0} rule: missing block'.format( + rule.at_keyword)) declarations, at_rules, rule_errors = \ self.parse_declarations_and_at_rules(rule.body, '@page') errors.extend(rule_errors) @@ -527,32 +529,34 @@ def parse_at_rule(self, rule, previous_rules, errors, context): raise ParseError(rule, 'expected media types for @media') media = self.parse_media(rule.head) if rule.body is None: - raise ParseError(rule, - 'invalid {0} rule: missing block'.format(rule.at_keyword)) + raise ParseError( + rule, 'invalid {0} rule: missing block'.format( + rule.at_keyword)) rules, rule_errors = self.parse_rules(rule.body, '@media') errors.extend(rule_errors) return MediaRule(media, rules, rule.line, rule.column) elif rule.at_keyword == '@import': if context != 'stylesheet': - raise ParseError(rule, - '@import rule not allowed in ' + context) + raise ParseError( + rule, '@import rule not allowed in ' + context) for previous_rule in previous_rules: if previous_rule.at_keyword not in ('@charset', '@import'): if previous_rule.at_keyword: type_ = 'an {0} rule'.format(previous_rule.at_keyword) else: type_ = 'a ruleset' - raise ParseError(previous_rule, + raise ParseError( + previous_rule, '@import rule not allowed after ' + type_) head = rule.head if not head: - raise ParseError(rule, - 'expected URI or STRING for @import rule') + raise ParseError( + rule, 'expected URI or STRING for @import rule') if head[0].type not in ('URI', 'STRING'): - raise ParseError(rule, - 'expected URI or STRING for @import rule, got ' - + head[0].type) + raise ParseError( + rule, 'expected URI or STRING for @import rule, got ' + + head[0].type) uri = head[0].value media = self.parse_media(strip_whitespace(head[1:])) if rule.body is not None: @@ -565,8 +569,9 @@ def parse_at_rule(self, rule, previous_rules, errors, context): raise ParseError(rule, 'mis-placed or malformed @charset rule') else: - raise ParseError(rule, 'unknown at-rule in {0} context: {1}' - .format(context, rule.at_keyword)) + raise ParseError( + rule, 'unknown at-rule in {0} context: {1}'.format( + context, rule.at_keyword)) def parse_media(self, tokens): """For CSS 2.1, parse a list of media types. @@ -588,8 +593,9 @@ def parse_media(self, tokens): if types == ['IDENT']: media_types.append(part[0].value) else: - raise ParseError(tokens[0], 'expected a media type' - + ((', got ' + ', '.join(types)) if types else '')) + raise ParseError( + tokens[0], 'expected a media type' + + ((', got ' + ', '.join(types)) if types else '')) return media_types def parse_page_selector(self, tokens): @@ -607,8 +613,8 @@ def parse_page_selector(self, tokens): """ if not tokens: return None, (0, 0) - if (len(tokens) == 2 and tokens[0].type == ':' - and tokens[1].type == 'IDENT'): + if (len(tokens) == 2 and tokens[0].type == ':' and + tokens[1].type == 'IDENT'): pseudo_class = tokens[1].value specificity = { 'first': (1, 0), 'left': (0, 1), 'right': (0, 1), @@ -677,8 +683,9 @@ def parse_ruleset(self, first_token, tokens): for one ruleset. :return: a tuple of a :class:`RuleSet` and an error list. - The errors are recovered :class:`~.parsing.ParseError` in declarations. - (Parsing continues from the next declaration on such errors.) + The errors are recovered :class:`~.parsing.ParseError` in + declarations. (Parsing continues from the next declaration on such + errors.) :raises: :class:`~.parsing.ParseError` if the selector is invalid for the core grammar. @@ -765,8 +772,9 @@ def parse_declaration(self, tokens): # CSS syntax is case-insensitive property_name = name_token.value.lower() else: - raise ParseError(name_token, - 'expected a property name, got {0}'.format(name_token.type)) + raise ParseError( + name_token, 'expected a property name, got {0}'.format( + name_token.type)) token = name_token # In case ``tokens`` is now empty for token in tokens: diff --git a/tinycss/decoding.py b/tinycss/decoding.py index 66ccbe2..09875a5 100644 --- a/tinycss/decoding.py +++ b/tinycss/decoding.py @@ -12,11 +12,9 @@ from __future__ import unicode_literals -from binascii import unhexlify import operator import re -import sys - +from binascii import unhexlify __all__ = ['decode'] # Everything else is implementation detail @@ -116,101 +114,101 @@ def __getitem__(self, slice_): ENCODING_MAGIC_NUMBERS = [ ((Slice[:], ''), re.compile( - hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22') - + b'([^\x22]*?)' - + hex2re('22 3B')).match), + hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22') + + b'([^\x22]*?)' + + hex2re('22 3B')).match), ('UTF-8', re.compile( hex2re('EF BB BF')).match), ((Slice[:], ''), re.compile( - hex2re('40 63 68 61 72 73 65 74 20 22') - + b'([^\x22]*?)' - + hex2re('22 3B')).match), + hex2re('40 63 68 61 72 73 65 74 20 22') + + b'([^\x22]*?)' + + hex2re('22 3B')).match), ((Slice[1::2], '-BE'), re.compile( hex2re('FE FF 00 40 00 63 00 68 00 61 00 72 00 73 00 65 00' - '74 00 20 00 22') - + b'((\x00[^\x22])*?)' - + hex2re('00 22 00 3B')).match), + '74 00 20 00 22') + + b'((\x00[^\x22])*?)' + + hex2re('00 22 00 3B')).match), ((Slice[1::2], '-BE'), re.compile( hex2re('00 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00' - '20 00 22') - + b'((\x00[^\x22])*?)' - + hex2re('00 22 00 3B')).match), + '20 00 22') + + b'((\x00[^\x22])*?)' + + hex2re('00 22 00 3B')).match), ((Slice[::2], '-LE'), re.compile( hex2re('FF FE 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74' - '00 20 00 22 00') - + b'(([^\x22]\x00)*?)' - + hex2re('22 00 3B 00')).match), + '00 20 00 22 00') + + b'(([^\x22]\x00)*?)' + + hex2re('22 00 3B 00')).match), ((Slice[::2], '-LE'), re.compile( hex2re('40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00 20' - '00 22 00') - + b'(([^\x22]\x00)*?)' - + hex2re('22 00 3B 00')).match), + '00 22 00') + + b'(([^\x22]\x00)*?)' + + hex2re('22 00 3B 00')).match), ((Slice[3::4], '-BE'), re.compile( hex2re('00 00 FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00' '00 00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00' - '00 74 00 00 00 20 00 00 00 22') - + b'((\x00\x00\x00[^\x22])*?)' - + hex2re('00 00 00 22 00 00 00 3B')).match), + '00 74 00 00 00 20 00 00 00 22') + + b'((\x00\x00\x00[^\x22])*?)' + + hex2re('00 00 00 22 00 00 00 3B')).match), ((Slice[3::4], '-BE'), re.compile( hex2re('00 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00' '00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00' - '00 20 00 00 00 22') - + b'((\x00\x00\x00[^\x22])*?)' - + hex2re('00 00 00 22 00 00 00 3B')).match), - - -# Python does not support 2143 or 3412 endianness, AFAIK. -# I guess we could fix it up ourselves but meh. Patches welcome. - -# ((Slice[2::4], '-2143'), re.compile( -# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00' -# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00' -# '74 00 00 00 20 00 00 00 22 00') -# + b'((\x00\x00[^\x22]\x00)*?)' -# + hex2re('00 00 22 00 00 00 3B 00')).match), - -# ((Slice[2::4], '-2143'), re.compile( -# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00' -# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00' -# '20 00 00 00 22 00') -# + b'((\x00\x00[^\x22]\x00)*?)' -# + hex2re('00 00 22 00 00 00 3B 00')).match), - -# ((Slice[1::4], '-3412'), re.compile( -# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00' -# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74' -# '00 00 00 20 00 00 00 22 00 00') -# + b'((\x00[^\x22]\x00\x00)*?)' -# + hex2re('00 22 00 00 00 3B 00 00')).match), - -# ((Slice[1::4], '-3412'), re.compile( -# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00' -# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20' -# '00 00 00 22 00 00') -# + b'((\x00[^\x22]\x00\x00)*?)' -# + hex2re('00 22 00 00 00 3B 00 00')).match), + '00 20 00 00 00 22') + + b'((\x00\x00\x00[^\x22])*?)' + + hex2re('00 00 00 22 00 00 00 3B')).match), + + + # Python does not support 2143 or 3412 endianness, AFAIK. + # I guess we could fix it up ourselves but meh. Patches welcome. + + # ((Slice[2::4], '-2143'), re.compile( + # hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00' + # '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00' + # '74 00 00 00 20 00 00 00 22 00') + + # b'((\x00\x00[^\x22]\x00)*?)' + + # hex2re('00 00 22 00 00 00 3B 00')).match), + + # ((Slice[2::4], '-2143'), re.compile( + # hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00' + # '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00' + # '20 00 00 00 22 00') + + # b'((\x00\x00[^\x22]\x00)*?)' + + # hex2re('00 00 22 00 00 00 3B 00')).match), + + # ((Slice[1::4], '-3412'), re.compile( + # hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00' + # '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74' + # '00 00 00 20 00 00 00 22 00 00') + + # b'((\x00[^\x22]\x00\x00)*?)' + + # hex2re('00 22 00 00 00 3B 00 00')).match), + + # ((Slice[1::4], '-3412'), re.compile( + # hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00' + # '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20' + # '00 00 00 22 00 00') + + # b'((\x00[^\x22]\x00\x00)*?)' + + # hex2re('00 22 00 00 00 3B 00 00')).match), ((Slice[::4], '-LE'), re.compile( hex2re('FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61' '00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00' - '00 00 20 00 00 00 22 00 00 00') - + b'(([^\x22]\x00\x00\x00)*?)' - + hex2re('22 00 00 00 3B 00 00 00')).match), + '00 00 20 00 00 00 22 00 00 00') + + b'(([^\x22]\x00\x00\x00)*?)' + + hex2re('22 00 00 00 3B 00 00 00')).match), ((Slice[::4], '-LE'), re.compile( hex2re('40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00 72' '00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20 00' - '00 00 22 00 00 00') - + b'(([^\x22]\x00\x00\x00)*?)' - + hex2re('22 00 00 00 3B 00 00 00')).match), + '00 00 22 00 00 00') + + b'(([^\x22]\x00\x00\x00)*?)' + + hex2re('22 00 00 00 3B 00 00 00')).match), ('UTF-32-BE', re.compile( hex2re('00 00 FE FF')).match), @@ -218,11 +216,11 @@ def __getitem__(self, slice_): ('UTF-32-LE', re.compile( hex2re('FF FE 00 00')).match), -# ('UTF-32-2143', re.compile( -# hex2re('00 00 FF FE')).match), + # ('UTF-32-2143', re.compile( + # hex2re('00 00 FF FE')).match), -# ('UTF-32-3412', re.compile( -# hex2re('FE FF 00 00')).match), + # ('UTF-32-3412', re.compile( + # hex2re('FE FF 00 00')).match), ('UTF-16-BE', re.compile( hex2re('FE FF')).match), @@ -231,24 +229,24 @@ def __getitem__(self, slice_): hex2re('FF FE')).match), -# Some of there are supported by Python, but I didn’t bother. -# You know the story with patches ... + # Some of there are supported by Python, but I didn’t bother. + # You know the story with patches ... -# # as specified, transcoded from EBCDIC to ASCII -# ('as_specified-EBCDIC', re.compile( -# hex2re('7C 83 88 81 99 A2 85 A3 40 7F') -# + b'([^\x7F]*?)' -# + hex2re('7F 5E')).match), + # # as specified, transcoded from EBCDIC to ASCII + # ('as_specified-EBCDIC', re.compile( + # hex2re('7C 83 88 81 99 A2 85 A3 40 7F') + # + b'([^\x7F]*?)' + # + hex2re('7F 5E')).match), -# # as specified, transcoded from IBM1026 to ASCII -# ('as_specified-IBM1026', re.compile( -# hex2re('AE 83 88 81 99 A2 85 A3 40 FC') -# + b'([^\xFC]*?)' -# + hex2re('FC 5E')).match), + # # as specified, transcoded from IBM1026 to ASCII + # ('as_specified-IBM1026', re.compile( + # hex2re('AE 83 88 81 99 A2 85 A3 40 FC') + # + b'([^\xFC]*?)' + # + hex2re('FC 5E')).match), -# # as specified, transcoded from GSM 03.38 to ASCII -# ('as_specified-GSM_03.38', re.compile( -# hex2re('00 63 68 61 72 73 65 74 20 22') -# + b'([^\x22]*?)' -# + hex2re('22 3B')).match), + # # as specified, transcoded from GSM 03.38 to ASCII + # ('as_specified-GSM_03.38', re.compile( + # hex2re('00 63 68 61 72 73 65 74 20 22') + # + b'([^\x22]*?)' + # + hex2re('22 3B')).match), ] diff --git a/tinycss/fonts3.py b/tinycss/fonts3.py index 9543466..748b9fb 100644 --- a/tinycss/fonts3.py +++ b/tinycss/fonts3.py @@ -12,7 +12,7 @@ :license: BSD, see LICENSE for more details. """ -from __future__ import unicode_literals, division +from __future__ import division, unicode_literals from .css21 import CSS21Parser, ParseError @@ -135,14 +135,16 @@ class CSSFonts3Parser(CSS21Parser): def parse_at_rule(self, rule, previous_rules, errors, context): if rule.at_keyword == '@font-face': if rule.head: - raise ParseError(rule.head[0], - 'unexpected %s token in %s rule header' - % (rule.head[0].type, rule.at_keyword)) + raise ParseError( + rule.head[0], + 'unexpected {0} token in {1} rule header'.format( + rule.head[0].type, rule.at_keyword)) declarations, body_errors = self.parse_declaration_list(rule.body) errors.extend(body_errors) names = [declaration.name for declaration in declarations] if 'src' not in names or 'font-family' not in names: - raise ParseError(rule, + raise ParseError( + rule, '@font-face rule needs src and font-family descriptors') return FontFaceRule( rule.at_keyword, declarations, rule.line, rule.column) @@ -157,8 +159,9 @@ def parse_at_rule(self, rule, previous_rules, errors, context): rule.line, rule.column) elif rule.at_keyword in self.FONT_FEATURE_VALUES_AT_KEYWORDS: if context != '@font-feature-values': - raise ParseError(rule, - '%s rule not allowed in %s' % (rule.at_keyword, context)) + raise ParseError( + rule, '{0} rule not allowed in {1}'.format( + rule.at_keyword, context)) declarations, body_errors = self.parse_declaration_list(rule.body) errors.extend(body_errors) return FontFeatureRule( diff --git a/tinycss/page3.py b/tinycss/page3.py index c901ed5..6a89252 100644 --- a/tinycss/page3.py +++ b/tinycss/page3.py @@ -12,7 +12,8 @@ :license: BSD, see LICENSE for more details. """ -from __future__ import unicode_literals, division +from __future__ import division, unicode_literals + from .css21 import CSS21Parser, ParseError @@ -110,16 +111,18 @@ class CSSPage3Parser(CSS21Parser): def parse_at_rule(self, rule, previous_rules, errors, context): if rule.at_keyword in self.PAGE_MARGIN_AT_KEYWORDS: if context != '@page': - raise ParseError(rule, - '%s rule not allowed in %s' % (rule.at_keyword, context)) + raise ParseError( + rule, '{0} rule not allowed in {1}'.format( + rule.at_keyword, context)) if rule.head: - raise ParseError(rule.head[0], - 'unexpected %s token in %s rule header' - % (rule.head[0].type, rule.at_keyword)) + raise ParseError( + rule.head[0], + 'unexpected {0} token in {1} rule header'.format( + rule.head[0].type, rule.at_keyword)) declarations, body_errors = self.parse_declaration_list(rule.body) errors.extend(body_errors) - return MarginRule(rule.at_keyword, declarations, - rule.line, rule.column) + return MarginRule( + rule.at_keyword, declarations, rule.line, rule.column) return super(CSSPage3Parser, self).parse_at_rule( rule, previous_rules, errors, context) @@ -147,8 +150,8 @@ def parse_page_selector(self, head): else: name = None name_specificity = (0,) - if (len(head) == 2 and head[0].type == ':' - and head[1].type == 'IDENT'): + if (len(head) == 2 and head[0].type == ':' and + head[1].type == 'IDENT'): pseudo_class = head[1].value specificity = { 'first': (1, 0), 'blank': (1, 0), diff --git a/tinycss/parsing.py b/tinycss/parsing.py index 8479e72..c01e9c2 100644 --- a/tinycss/parsing.py +++ b/tinycss/parsing.py @@ -95,6 +95,7 @@ def validate_value(tokens): else: validate_any(token, 'property value') + def validate_block(tokens, context): """ :raises: @@ -132,8 +133,8 @@ def validate_any(token, context): adjective = 'unmatched' else: adjective = 'unexpected' - raise ParseError(token, - '{0} {1} token in {2}'.format(adjective, type_, context)) + raise ParseError( + token, '{0} {1} token in {2}'.format(adjective, type_, context)) class ParseError(ValueError): diff --git a/tinycss/tests/speed.py b/tinycss/tests/speed.py index b80141e..860e5df 100644 --- a/tinycss/tests/speed.py +++ b/tinycss/tests/speed.py @@ -11,13 +11,13 @@ """ -from __future__ import unicode_literals, division +from __future__ import division, unicode_literals -import sys -import os.path import contextlib -import timeit import functools +import os.path +import sys +import timeit from cssutils import parseString @@ -25,7 +25,6 @@ from ..css21 import CSS21Parser from ..parsing import remove_whitespace - CSS_REPEAT = 4 TIMEIT_REPEAT = 3 TIMEIT_NUMBER = 20 @@ -82,8 +81,6 @@ def parse_cssutils(): def check_consistency(): result = parse_python() - #import pprint - #pprint.pprint(result) assert len(result) > 0 if tokenizer.cython_tokenize_flat: assert parse_cython() == result diff --git a/tinycss/tests/test_api.py b/tinycss/tests/test_api.py index 7eb007e..a0510b9 100644 --- a/tinycss/tests/test_api.py +++ b/tinycss/tests/test_api.py @@ -9,10 +9,8 @@ from __future__ import unicode_literals -import itertools from pytest import raises - from tinycss import make_parser from tinycss.page3 import CSSPage3Parser diff --git a/tinycss/tests/test_color3.py b/tinycss/tests/test_color3.py index 79c37dc..e48771e 100644 --- a/tinycss/tests/test_color3.py +++ b/tinycss/tests/test_color3.py @@ -11,8 +11,7 @@ from __future__ import unicode_literals import pytest - -from tinycss.color3 import parse_color_string, hsl_to_rgb +from tinycss.color3 import hsl_to_rgb, parse_color_string @pytest.mark.parametrize(('css_source', 'expected_result'), [ @@ -172,30 +171,30 @@ def test_color(css_source, expected_result): @pytest.mark.parametrize(('hsl', 'expected_rgb'), [ # http://en.wikipedia.org/wiki/HSL_and_HSV#Examples - ((0, 0, 100 ), (1, 1, 1 )), - ((127, 0, 100 ), (1, 1, 1 )), - ((0, 0, 50 ), (0.5, 0.5, 0.5 )), - ((127, 0, 50 ), (0.5, 0.5, 0.5 )), - ((0, 0, 0 ), (0, 0, 0 )), - ((127, 0, 0 ), (0, 0, 0 )), - ((0, 100, 50 ), (1, 0, 0 )), - ((60, 100, 37.5), (0.75, 0.75, 0 )), - ((780, 100, 37.5), (0.75, 0.75, 0 )), - ((-300, 100, 37.5), (0.75, 0.75, 0 )), - ((120, 100, 25 ), (0, 0.5, 0 )), - ((180, 100, 75 ), (0.5, 1, 1 )), - ((240, 100, 75 ), (0.5, 0.5, 1 )), - ((300, 50, 50 ), (0.75, 0.25, 0.75 )), - ((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)), - ((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)), - ((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)), - ((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)), - ((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)), - ((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)), - ((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)), - ((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)), - ((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)), - ((240.5, 29, 60.7), (0.495, 0.493, 0.721)), + ((0, 0, 100 ), (1, 1, 1 )), # noqa + ((127, 0, 100 ), (1, 1, 1 )), # noqa + ((0, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa + ((127, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa + ((0, 0, 0 ), (0, 0, 0 )), # noqa + ((127, 0, 0 ), (0, 0, 0 )), # noqa + ((0, 100, 50 ), (1, 0, 0 )), # noqa + ((60, 100, 37.5), (0.75, 0.75, 0 )), # noqa + ((780, 100, 37.5), (0.75, 0.75, 0 )), # noqa + ((-300, 100, 37.5), (0.75, 0.75, 0 )), # noqa + ((120, 100, 25 ), (0, 0.5, 0 )), # noqa + ((180, 100, 75 ), (0.5, 1, 1 )), # noqa + ((240, 100, 75 ), (0.5, 0.5, 1 )), # noqa + ((300, 50, 50 ), (0.75, 0.25, 0.75 )), # noqa + ((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)), # noqa + ((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)), # noqa + ((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)), # noqa + ((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)), # noqa + ((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)), # noqa + ((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)), # noqa + ((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)), # noqa + ((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)), # noqa + ((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)), # noqa + ((240.5, 29, 60.7), (0.495, 0.493, 0.721)), # noqa ]) def test_hsl(hsl, expected_rgb): for got, expected in zip(hsl_to_rgb(*hsl), expected_rgb): diff --git a/tinycss/tests/test_css21.py b/tinycss/tests/test_css21.py index 3af497c..a8ca956 100644 --- a/tinycss/tests/test_css21.py +++ b/tinycss/tests/test_css21.py @@ -9,16 +9,16 @@ from __future__ import unicode_literals + import io import os import tempfile import pytest - from tinycss.css21 import CSS21Parser -from .test_tokenizer import jsonify from . import assert_errors +from .test_tokenizer import jsonify def parse_bytes(css_bytes, kwargs): @@ -49,7 +49,7 @@ def parse_filename(css_bytes, kwargs): ('@import "é";'.encode('utf8'), {}, 'é'), ('@import "é";'.encode('utf16'), {}, 'é'), # with a BOM ('@import "é";'.encode('latin1'), {}, 'é'), - ('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # latin1 mojibake + ('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # lat1 mojibake ('@charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {}, '£'), (' @charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), @@ -77,7 +77,8 @@ def test_bytes(css_bytes, kwargs, expected_result, parse): ('foo{} @lipsum{} bar{}', 2, ['unknown at-rule in stylesheet context: @lipsum']), ('@charset "ascii"; foo {}', 1, []), - (' @charset "ascii"; foo {}', 1, ['mis-placed or malformed @charset rule']), + (' @charset "ascii"; foo {}', 1, [ + 'mis-placed or malformed @charset rule']), ('@charset ascii; foo {}', 1, ['mis-placed or malformed @charset rule']), ('foo {} @charset "ascii";', 1, ['mis-placed or malformed @charset rule']), ]) @@ -109,8 +110,8 @@ def test_at_rules(css_source, expected_rules, expected_errors): ('a{b:4}', [('a', [('b', [('INTEGER', 4)])])], []), ('@page {\t b: 4; @margin}', [('@page', [], [ - ('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4), - (';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'), + ('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4), + (';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'), ])], []), ('foo', [], ['no declaration block found']), diff --git a/tinycss/tests/test_decoding.py b/tinycss/tests/test_decoding.py index e2c88a4..eaa8019 100644 --- a/tinycss/tests/test_decoding.py +++ b/tinycss/tests/test_decoding.py @@ -11,7 +11,6 @@ from __future__ import unicode_literals import pytest - from tinycss.decoding import decode @@ -30,13 +29,13 @@ def params(css, encoding, use_bom=False, expect_error=False, **kwargs): params('£', 'ShiftJIS', linking_encoding='Shift-JIS'), params('£', 'ShiftJIS', document_encoding='Shift-JIS'), params('£', 'ShiftJIS', protocol_encoding='utf8', - document_encoding='ShiftJIS'), + document_encoding='ShiftJIS'), params('@charset "utf8"; £', 'ShiftJIS', expect_error=True), params('@charset "utf£8"; £', 'ShiftJIS', expect_error=True), params('@charset "unknown-encoding"; £', 'ShiftJIS', expect_error=True), params('@charset "utf8"; £', 'ShiftJIS', document_encoding='ShiftJIS'), params('£', 'ShiftJIS', linking_encoding='utf8', - document_encoding='ShiftJIS'), + document_encoding='ShiftJIS'), params('@charset "utf-32"; 𐂃', 'utf-32-be'), params('@charset "Shift-JIS"; £', 'ShiftJIS'), params('@charset "ISO-8859-8"; £', 'ShiftJIS', expect_error=True), diff --git a/tinycss/tests/test_fonts3.py b/tinycss/tests/test_fonts3.py index c31a1c1..7c354ca 100644 --- a/tinycss/tests/test_fonts3.py +++ b/tinycss/tests/test_fonts3.py @@ -11,10 +11,10 @@ from __future__ import unicode_literals import pytest - from tinycss.fonts3 import CSSFonts3Parser -from .test_tokenizer import jsonify + from . import assert_errors +from .test_tokenizer import jsonify @pytest.mark.parametrize(('css', 'expected_family_names', 'expected_errors'), [ @@ -43,8 +43,8 @@ ('@font-feature-values #Foo {}', None, [ 'invalid @font-feature-values selector']), # TODO: this currently works but should not work - #('@font-feature-values test@foo {}', None, [ - # 'invalid @font-feature-values selector']), + # ('@font-feature-values test@foo {}', None, [ + # 'invalid @font-feature-values selector']), ('@font-feature-values Hawaii 5-0 {}', None, [ 'invalid @font-feature-values selector']), ]) @@ -127,10 +127,10 @@ def declarations(rule): ('sharp-terminals', [ ('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])]), ('@styleset', [('top', [('INTEGER', 9000)])])], [ - 'unexpected ; token in selector', - 'expected a property name, got ATKEYWORD', - 'expected a property name, got ATKEYWORD', - 'no declaration block found for ruleset']), + 'unexpected ; token in selector', + 'expected a property name, got ATKEYWORD', + 'expected a property name, got ATKEYWORD', + 'no declaration block found for ruleset']), ]) def test_font_feature_values_content(css, expected_rules, expected_errors): stylesheet = CSSFonts3Parser().parse_stylesheet(css) diff --git a/tinycss/tests/test_page3.py b/tinycss/tests/test_page3.py index 9c55b47..7d2b2d2 100644 --- a/tinycss/tests/test_page3.py +++ b/tinycss/tests/test_page3.py @@ -11,10 +11,10 @@ from __future__ import unicode_literals import pytest - from tinycss.page3 import CSSPage3Parser -from .test_tokenizer import jsonify + from . import assert_errors +from .test_tokenizer import jsonify @pytest.mark.parametrize(('css', 'expected_selector', @@ -56,7 +56,7 @@ def test_selectors(css, expected_selector, expected_specificity, @pytest.mark.parametrize(('css', 'expected_declarations', - 'expected_rules','expected_errors'), [ + 'expected_rules', 'expected_errors'), [ ('@page {}', [], [], []), ('@page { foo: 4; bar: z }', [('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []), @@ -68,7 +68,7 @@ def test_selectors(css, expected_selector, expected_specificity, [('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [('@top-center', [('content', [('STRING', 'Awesome Title')])]), ('@bottom-left', [('content', [ - ('FUNCTION', 'counter', [('IDENT', 'page')])])])], + ('FUNCTION', 'counter', [('IDENT', 'page')])])])], []), ('''@page { foo: 4; @bottom-top { content: counter(page) } diff --git a/tinycss/tests/test_tokenizer.py b/tinycss/tests/test_tokenizer.py index 51328b8..b5dfe29 100644 --- a/tinycss/tests/test_tokenizer.py +++ b/tinycss/tests/test_tokenizer.py @@ -10,13 +10,12 @@ from __future__ import unicode_literals -import sys import os +import sys import pytest - from tinycss.tokenizer import ( - python_tokenize_flat, cython_tokenize_flat, regroup) + cython_tokenize_flat, python_tokenize_flat, regroup) def test_speedups(): @@ -29,100 +28,94 @@ def test_speedups(): @pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [ - (tokenize,) + test_data - for tokenize in (python_tokenize_flat, cython_tokenize_flat) - for test_data in [ - ('', []), - ('red -->', - [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]), - # Longest match rule: no CDC - ('red-->', - [('IDENT', 'red--'), ('DELIM', '>')]), - - (r'''p[example="\ -foo(int x) {\ - this.x = x;\ -}\ -"]''', [ - ('IDENT', 'p'), - ('[', '['), - ('IDENT', 'example'), - ('DELIM', '='), - ('STRING', 'foo(int x) { this.x = x;}'), - (']', ']')]), - - #### Numbers are parsed - ('42 .5 -4pX 1.25em 30%', - [('INTEGER', 42), ('S', ' '), - ('NUMBER', .5), ('S', ' '), - # units are normalized to lower-case: - ('DIMENSION', -4, 'px'), ('S', ' '), - ('DIMENSION', 1.25, 'em'), ('S', ' '), - ('PERCENTAGE', 30, '%')]), - - #### URLs are extracted - ('url(foo.png)', [('URI', 'foo.png')]), - ('url("foo.png")', [('URI', 'foo.png')]), - - #### Escaping - - (r'/* Comment with a \ backslash */', - [('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged - - # backslash followed by a newline in a string: ignored - ('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]), - - # backslash followed by a newline outside a string: stands for itself - ('Lorem\\\nIpsum', [ - ('IDENT', 'Lorem'), ('DELIM', '\\'), - ('S', '\n'), ('IDENT', 'Ipsum')]), - - # Cancel the meaning of special characters - (r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal - (r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]), - (r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]), - (r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]), - (r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]), - (r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]), - (r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]), - (r'Lorem+Ipsum', [('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]), - (r'url(foo\).png)', [('URI', 'foo).png')]), - - # Unicode and backslash escaping - ('\\26 B', [('IDENT', '&B')]), - ('\\&B', [('IDENT', '&B')]), - ('@\\26\tB', [('ATKEYWORD', '@&B')]), - ('@\\&B', [('ATKEYWORD', '@&B')]), - ('#\\26\nB', [('HASH', '#&B')]), - ('#\\&B', [('HASH', '#&B')]), - ('\\26\r\nB(', [('FUNCTION', '&B(')]), - ('\\&B(', [('FUNCTION', '&B(')]), - (r'12.5\000026B', [('DIMENSION', 12.5, '&b')]), - (r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits - (r'12.5\&B', [('DIMENSION', 12.5, '&b')]), - (r'"\26 B"', [('STRING', '&B')]), - (r"'\000026B'", [('STRING', '&B')]), - (r'"\&B"', [('STRING', '&B')]), - (r'url("\26 B")', [('URI', '&B')]), - (r'url(\26 B)', [('URI', '&B')]), - (r'url("\&B")', [('URI', '&B')]), - (r'url(\&B)', [('URI', '&B')]), - (r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]), - - #### Bad strings - - # String ends at EOF without closing: no error, parsed - ('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]), - # Unescaped newline: ends the string, error, unparsed - ('"Lorem\\26Ipsum\n', [ - ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]), - # Tokenization restarts after the newline, so the second " starts - # a new string (which ends at EOF without errors, as above.) - ('"Lorem\\26Ipsum\ndolor" sit', [ - ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'), - ('IDENT', 'dolor'), ('STRING', ' sit')]), - -]]) + (tokenize,) + test_data + for tokenize in (python_tokenize_flat, cython_tokenize_flat) + for test_data in [ + ('', []), + ('red -->', [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]), + # Longest match rule: no CDC + ('red-->', [('IDENT', 'red--'), ('DELIM', '>')]), + (r'p[example="foo(int x) { this.x = x;}"]', [ + ('IDENT', 'p'), + ('[', '['), + ('IDENT', 'example'), + ('DELIM', '='), + ('STRING', 'foo(int x) { this.x = x;}'), + (']', ']')]), + + # Numbers are parsed + ('42 .5 -4pX 1.25em 30%', [ + ('INTEGER', 42), ('S', ' '), + ('NUMBER', .5), ('S', ' '), + # units are normalized to lower-case: + ('DIMENSION', -4, 'px'), ('S', ' '), + ('DIMENSION', 1.25, 'em'), ('S', ' '), + ('PERCENTAGE', 30, '%')]), + + # URLs are extracted + ('url(foo.png)', [('URI', 'foo.png')]), + ('url("foo.png")', [('URI', 'foo.png')]), + + # Escaping + + (r'/* Comment with a \ backslash */', [ + ('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged + + # backslash followed by a newline in a string: ignored + ('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]), + + # backslash followed by a newline outside a string: stands for itself + ('Lorem\\\nIpsum', [ + ('IDENT', 'Lorem'), ('DELIM', '\\'), + ('S', '\n'), ('IDENT', 'Ipsum')]), + + # Cancel the meaning of special characters + (r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal + (r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]), + (r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]), + (r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]), + (r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]), + (r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]), + (r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]), + (r'Lorem+Ipsum', [ + ('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]), + (r'url(foo\).png)', [('URI', 'foo).png')]), + + # Unicode and backslash escaping + ('\\26 B', [('IDENT', '&B')]), + ('\\&B', [('IDENT', '&B')]), + ('@\\26\tB', [('ATKEYWORD', '@&B')]), + ('@\\&B', [('ATKEYWORD', '@&B')]), + ('#\\26\nB', [('HASH', '#&B')]), + ('#\\&B', [('HASH', '#&B')]), + ('\\26\r\nB(', [('FUNCTION', '&B(')]), + ('\\&B(', [('FUNCTION', '&B(')]), + (r'12.5\000026B', [('DIMENSION', 12.5, '&b')]), + (r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits + (r'12.5\&B', [('DIMENSION', 12.5, '&b')]), + (r'"\26 B"', [('STRING', '&B')]), + (r"'\000026B'", [('STRING', '&B')]), + (r'"\&B"', [('STRING', '&B')]), + (r'url("\26 B")', [('URI', '&B')]), + (r'url(\26 B)', [('URI', '&B')]), + (r'url("\&B")', [('URI', '&B')]), + (r'url(\&B)', [('URI', '&B')]), + (r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]), + + # Bad strings + + # String ends at EOF without closing: no error, parsed + ('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]), + # Unescaped newline: ends the string, error, unparsed + ('"Lorem\\26Ipsum\n', [ + ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]), + # Tokenization restarts after the newline, so the second " starts + # a new string (which ends at EOF without errors, as above.) + ('"Lorem\\26Ipsum\ndolor" sit', [ + ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'), + ('IDENT', 'dolor'), ('STRING', ' sit')]), + + ]]) def test_tokens(tokenize, css_source, expected_tokens): if tokenize is None: # pragma: no cover pytest.skip('Speedups not available') @@ -160,64 +153,64 @@ def test_positions(tokenize): @pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [ - (tokenize,) + test_data - for tokenize in (python_tokenize_flat, cython_tokenize_flat) - for test_data in [ - ('', []), - (r'Lorem\26 "i\psum"4px', [ - ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]), - - ('not([[lorem]]{ipsum (42)})', [ - ('FUNCTION', 'not', [ - ('[', [ + (tokenize,) + test_data + for tokenize in (python_tokenize_flat, cython_tokenize_flat) + for test_data in [ + ('', []), + (r'Lorem\26 "i\psum"4px', [ + ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]), + + ('not([[lorem]]{ipsum (42)})', [ + ('FUNCTION', 'not', [ ('[', [ - ('IDENT', 'lorem'), + ('[', [ + ('IDENT', 'lorem'), + ]), ]), - ]), - ('{', [ - ('IDENT', 'ipsum'), - ('S', ' '), - ('(', [ - ('INTEGER', 42), + ('{', [ + ('IDENT', 'ipsum'), + ('S', ' '), + ('(', [ + ('INTEGER', 42), + ]) ]) - ]) - ])]), - - # Close everything at EOF, no error - ('a[b{"d', [ - ('IDENT', 'a'), - ('[', [ - ('IDENT', 'b'), - ('{', [ - ('STRING', 'd'), + ])]), + + # Close everything at EOF, no error + ('a[b{"d', [ + ('IDENT', 'a'), + ('[', [ + ('IDENT', 'b'), + ('{', [ + ('STRING', 'd'), + ]), ]), ]), - ]), - - # Any remaining ), ] or } token is a nesting error - ('a[b{d]e}', [ - ('IDENT', 'a'), - ('[', [ - ('IDENT', 'b'), - ('{', [ - ('IDENT', 'd'), - (']', ']'), # The error is visible here - ('IDENT', 'e'), + + # Any remaining ), ] or } token is a nesting error + ('a[b{d]e}', [ + ('IDENT', 'a'), + ('[', [ + ('IDENT', 'b'), + ('{', [ + ('IDENT', 'd'), + (']', ']'), # The error is visible here + ('IDENT', 'e'), + ]), ]), ]), - ]), - # ref: - ('a[b{d}e]', [ - ('IDENT', 'a'), - ('[', [ - ('IDENT', 'b'), - ('{', [ - ('IDENT', 'd'), + # ref: + ('a[b{d}e]', [ + ('IDENT', 'a'), + ('[', [ + ('IDENT', 'b'), + ('{', [ + ('IDENT', 'd'), + ]), + ('IDENT', 'e'), ]), - ('IDENT', 'e'), ]), - ]), -]]) + ]]) def test_token_grouping(tokenize, css_source, expected_tokens): if tokenize is None: # pragma: no cover pytest.skip('Speedups not available') @@ -239,27 +232,27 @@ def jsonify(tokens): @pytest.mark.parametrize(('tokenize', 'ignore_comments', 'expected_tokens'), [ - (tokenize,) + test_data - for tokenize in (python_tokenize_flat, cython_tokenize_flat) - for test_data in [ - (False, [ - ('COMMENT', '/* lorem */'), - ('S', ' '), - ('IDENT', 'ipsum'), - ('[', [ - ('IDENT', 'dolor'), - ('COMMENT', '/* sit */'), + (tokenize,) + test_data + for tokenize in (python_tokenize_flat, cython_tokenize_flat) + for test_data in [ + (False, [ + ('COMMENT', '/* lorem */'), + ('S', ' '), + ('IDENT', 'ipsum'), + ('[', [ + ('IDENT', 'dolor'), + ('COMMENT', '/* sit */'), + ]), + ('BAD_COMMENT', '/* amet') ]), - ('BAD_COMMENT', '/* amet') - ]), - (True, [ - ('S', ' '), - ('IDENT', 'ipsum'), - ('[', [ - ('IDENT', 'dolor'), + (True, [ + ('S', ' '), + ('IDENT', 'ipsum'), + ('[', [ + ('IDENT', 'dolor'), + ]), ]), - ]), -]]) + ]]) def test_comments(tokenize, ignore_comments, expected_tokens): if tokenize is None: # pragma: no cover pytest.skip('Speedups not available') @@ -270,20 +263,16 @@ def test_comments(tokenize, ignore_comments, expected_tokens): @pytest.mark.parametrize(('tokenize', 'css_source'), [ - (tokenize, test_data) - for tokenize in (python_tokenize_flat, cython_tokenize_flat) - for test_data in [ - r'''p[example="\ -foo(int x) {\ - this.x = x;\ -}\ -"]''', - '"Lorem\\26Ipsum\ndolor" sit', - '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }', - 'not([[lorem]]{ipsum (42)})', - 'a[b{d]e}', - 'a[b{"d', -]]) + (tokenize, test_data) + for tokenize in (python_tokenize_flat, cython_tokenize_flat) + for test_data in [ + r'p[example="foo(int x) { this.x = x;}"]', + '"Lorem\\26Ipsum\ndolor" sit', + '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }', + 'not([[lorem]]{ipsum (42)})', + 'a[b{d]e}', + 'a[b{"d', + ]]) def test_token_serialize_css(tokenize, css_source): if tokenize is None: # pragma: no cover pytest.skip('Speedups not available') diff --git a/tinycss/token_data.py b/tinycss/token_data.py index 32fc26f..4111e1f 100644 --- a/tinycss/token_data.py +++ b/tinycss/token_data.py @@ -11,12 +11,11 @@ from __future__ import unicode_literals -import re -import sys -import operator import functools +import operator +import re import string - +import sys # * Raw strings with the r'' notation are used so that \ do not need # to be escaped. @@ -206,7 +205,7 @@ def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode): '') SIMPLE_UNESCAPE = functools.partial( - re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'] , re.I).sub, + re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'], re.I).sub, # Same as r'\1', but faster on CPython operator.methodcaller('group', 1)) @@ -328,16 +327,18 @@ def as_css(self): def __repr__(self): return ('' .format(self, self.unit or '')) - + def __eq__(self, other): if type(self) != type(other): - raise TypeError("Cannot compare %s and %s" % type(self), type(other)) - else: return all( - self.type_ == other.type_, - self._as_css == other._as_css, - self.value == other.value, - self.unit == other.unit, - ) + raise TypeError( + 'Cannot compare {0} and {1}'.format(type(self), type(other))) + else: + return all( + self.type_ == other.type_, + self._as_css == other._as_css, + self.value == other.value, + self.unit == other.unit, + ) class ContainerToken(object): diff --git a/tinycss/tokenizer.py b/tinycss/tokenizer.py index e4a4661..114b19b 100644 --- a/tinycss/tokenizer.py +++ b/tinycss/tokenizer.py @@ -17,20 +17,20 @@ from . import token_data -def tokenize_flat(css_source, ignore_comments=True, - # Make these local variable to avoid global lookups in the loop - tokens_dispatch=token_data.TOKEN_DISPATCH, - unicode_unescape=token_data.UNICODE_UNESCAPE, - newline_unescape=token_data.NEWLINE_UNESCAPE, - simple_unescape=token_data.SIMPLE_UNESCAPE, - find_newlines=token_data.FIND_NEWLINES, - Token=token_data.Token, - len=len, - int=int, - float=float, - list=list, - _None=None, -): +def tokenize_flat( + css_source, ignore_comments=True, + # Make these local variable to avoid global lookups in the loop + tokens_dispatch=token_data.TOKEN_DISPATCH, + unicode_unescape=token_data.UNICODE_UNESCAPE, + newline_unescape=token_data.NEWLINE_UNESCAPE, + simple_unescape=token_data.SIMPLE_UNESCAPE, + find_newlines=token_data.FIND_NEWLINES, + Token=token_data.Token, + len=len, + int=int, + float=float, + list=list, + _None=None): """ :param css_source: CSS as an unicode string @@ -158,10 +158,9 @@ def regroup(tokens): tokens = iter(tokens) eof = [False] - def _regroup_inner(stop_at=None, - tokens=tokens, pairs=pairs, eof=eof, - ContainerToken=token_data.ContainerToken, - FunctionToken=token_data.FunctionToken): + def _regroup_inner(stop_at=None, tokens=tokens, pairs=pairs, eof=eof, + ContainerToken=token_data.ContainerToken, + FunctionToken=token_data.FunctionToken): for token in tokens: type_ = token.type if type_ == stop_at: From c5c0282087414e82137a88d7884a6eaab8f151cd Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 14:32:14 +0200 Subject: [PATCH 11/28] Small fixes for PyPy --- setup.cfg | 2 +- tinycss/tests/test_tokenizer.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 5b2ad96..acf1ffa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,4 +13,4 @@ test = pytest addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference flake8-ignore = docs/conf.py ALL -isort_ignore = docs/conf.py \ No newline at end of file +isort_ignore = setup.py docs/conf.py \ No newline at end of file diff --git a/tinycss/tests/test_tokenizer.py b/tinycss/tests/test_tokenizer.py index b5dfe29..f3e7a6f 100644 --- a/tinycss/tests/test_tokenizer.py +++ b/tinycss/tests/test_tokenizer.py @@ -19,12 +19,15 @@ def test_speedups(): - if os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS'): # pragma: no cover + is_pypy = hasattr(sys, 'pypy_translation_info') + env_skip_tests = os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS') + # pragma: no cover + if is_pypy or env_skip_tests: return assert cython_tokenize_flat is not None, ( 'Cython speedups are not installed, related tests will ' 'be skipped. Set the TINYCSS_SKIP_SPEEDUPS_TESTS environment ' - 'variable if this is expected (eg. on PyPy).') + 'variable if this is expected.') @pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [ From 80222295a8d9089c10f827ca6fa589f821877828 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 18:08:37 +0200 Subject: [PATCH 12/28] Fix tests with isort on Python 2 Isort creators are nicely allowed to go to hell for their pie_slice module. --- setup.cfg | 4 +++- tinycss/tests/__init__.py | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index acf1ffa..32f40ab 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,4 +13,6 @@ test = pytest addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference flake8-ignore = docs/conf.py ALL -isort_ignore = setup.py docs/conf.py \ No newline at end of file +isort_ignore = + docs/conf.py + setup.py diff --git a/tinycss/tests/__init__.py b/tinycss/tests/__init__.py index 1babd98..af7a49e 100644 --- a/tinycss/tests/__init__.py +++ b/tinycss/tests/__init__.py @@ -10,6 +10,14 @@ from __future__ import unicode_literals +import sys + + +# Awful workaround to fix isort's "sys.setdefaultencoding('utf-8')". +if sys.version_info[0] == 2: + reload(sys) # noqa + sys.setdefaultencoding('ascii') + def assert_errors(errors, expected_errors): """Test not complete error messages but only substrings.""" From eb0fb28b88c84ac0448534d65e9dca246f531065 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 18:17:00 +0200 Subject: [PATCH 13/28] Don't redefine the rule variable --- tinycss/tests/test_fonts3.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tinycss/tests/test_fonts3.py b/tinycss/tests/test_fonts3.py index 7c354ca..f90f91c 100644 --- a/tinycss/tests/test_fonts3.py +++ b/tinycss/tests/test_fonts3.py @@ -142,8 +142,8 @@ def test_font_feature_values_content(css, expected_rules, expected_errors): assert rule.at_keyword == '@font-feature-values' rules = [ - (rule.at_keyword, [ + (at_rule.at_keyword, [ (decl.name, list(jsonify(decl.value))) - for decl in rule.declarations]) - for rule in rule.at_rules] if rule.at_rules else None + for decl in at_rule.declarations]) + for at_rule in rule.at_rules] if rule.at_rules else None assert rules == expected_rules From 673910edf1ae4d0772e267ee2e0f49d2f14120b4 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 14 Sep 2016 18:22:10 +0200 Subject: [PATCH 14/28] Add CPython and PyPy classifiers in setup.py --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index 13a1749..6139230 100644 --- a/setup.py +++ b/setup.py @@ -94,6 +94,8 @@ def run_setup(with_extension): 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', ], setup_requires=pytest_runner, tests_require=[ From 51006e280ed0ac28b8f567343b671a102a5261e4 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 16:50:07 +0200 Subject: [PATCH 15/28] Allow @font-face rules with missing descriptors --- tinycss/fonts3.py | 5 ----- tinycss/tests/test_fonts3.py | 7 +------ 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/tinycss/fonts3.py b/tinycss/fonts3.py index 748b9fb..c1f96a6 100644 --- a/tinycss/fonts3.py +++ b/tinycss/fonts3.py @@ -141,11 +141,6 @@ def parse_at_rule(self, rule, previous_rules, errors, context): rule.head[0].type, rule.at_keyword)) declarations, body_errors = self.parse_declaration_list(rule.body) errors.extend(body_errors) - names = [declaration.name for declaration in declarations] - if 'src' not in names or 'font-family' not in names: - raise ParseError( - rule, - '@font-face rule needs src and font-family descriptors') return FontFaceRule( rule.at_keyword, declarations, rule.line, rule.column) elif rule.at_keyword == '@font-feature-values': diff --git a/tinycss/tests/test_fonts3.py b/tinycss/tests/test_fonts3.py index f90f91c..ee8ab67 100644 --- a/tinycss/tests/test_fonts3.py +++ b/tinycss/tests/test_fonts3.py @@ -61,12 +61,7 @@ def test_font_feature_values_selectors(css, expected_family_names, @pytest.mark.parametrize(('css', 'expected_declarations', 'expected_errors'), [ - ('@font-face {}', None, [ - '@font-face rule needs src and font-family descriptors']), - ('@font-face {src:"lol"}', None, [ - '@font-face rule needs src and font-family descriptors']), - ('@font-face {font-family:"lol"}', None, [ - '@font-face rule needs src and font-family descriptors']), + ('@font-face {}', [], []), ('@font-face test { src: "lol"; font-family: "bar" }', None, [ 'unexpected IDENT token in @font-face rule header']), ('@font-face { src: "lol"; font-family: "bar" }', [ From 31625bef5e4fcc53c1fd0b3f5a8b97b4bc638f6b Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 17:51:09 +0200 Subject: [PATCH 16/28] Update docs --- README.rst | 3 ++- docs/css3.rst | 11 +++++++++++ docs/index.rst | 7 ++++--- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index c8ca237..9f1879b 100644 --- a/README.rst +++ b/README.rst @@ -5,6 +5,7 @@ tinycss: CSS parser for Python syntax and error handling for CSS 2.1 as well as some CSS 3 modules: * CSS Color 3 +* CSS Fonts 3 * CSS Paged Media 3 It is designed to be easy to extend for new CSS modules and syntax, @@ -13,7 +14,7 @@ and integrates well with cssselect_ for Selectors 3 support. Quick facts: * Free software: BSD licensed -* Compatible with Python 2.6+ and 3.x +* Compatible with Python 2.7 and 3.x * Latest documentation `on python.org`_ * Source, issues and pull requests `on Github`_ * Releases `on PyPI`_ diff --git a/docs/css3.rst b/docs/css3.rst index bee533c..91ad4c2 100644 --- a/docs/css3.rst +++ b/docs/css3.rst @@ -99,6 +99,17 @@ Paged Media 3 .. autoclass:: MarginRule +.. module:: tinycss.fonts3 + +Fonts 3 +------- + +.. autoclass:: CSSFonts3Parser +.. autoclass:: FontFaceRule +.. autoclass:: FontFeatureValuesRule +.. autoclass:: FontFeatureRule + + Other CSS modules ----------------- diff --git a/docs/index.rst b/docs/index.rst index 2c5c8a4..838f6ac 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,9 +4,10 @@ Requirements ------------ -tinycss is tested on CPython 2.6, 2.7, 3.1 and 3.2 as well as PyPy 1.8; -it should work on any implementation of **Python 2.6 or later version -(including 3.x)** of the language. +`tinycss is tested ` on CPython 2.7, 3.3, +3.4 and 3.5 as well as PyPy 5.3 and PyPy3 2.4; it should work on any +implementation of **Python 2.7 or later version (including 3.x)** of the +language. Cython_ is used for optional accelerators but is only required for development versions on tinycss. From c9565fd696a3745d81fa7c8a4d455efa7b9a990c Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:06:47 +0200 Subject: [PATCH 17/28] Use the default theme for the docs --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 7d9c2fe..fa7da0a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -97,7 +97,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'agogo' +#html_theme = 'agogo' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From c94e69849133ef487168ec3aea575ca69acb9f36 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:07:51 +0200 Subject: [PATCH 18/28] Fix a link in docs --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 838f6ac..541ff9d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ Requirements ------------ -`tinycss is tested ` on CPython 2.7, 3.3, +`tinycss is tested `_ on CPython 2.7, 3.3, 3.4 and 3.5 as well as PyPy 5.3 and PyPy3 2.4; it should work on any implementation of **Python 2.7 or later version (including 3.x)** of the language. From 40e921fc049ba7fbd6bddb9a88f789d049bf5455 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:11:55 +0200 Subject: [PATCH 19/28] Update CHANGES --- CHANGES | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index f98be83..27b8cf7 100644 --- a/CHANGES +++ b/CHANGES @@ -2,10 +2,20 @@ tinycss changelog ================= +Version 0.4 +----------- + +Released on 2016-09-23. + +* Add an __eq__ operator to Token object. +* Support Fonts 3. + + + Version 0.3 ----------- -Not released yet. +Released on 2012-09-18. * Fix a bug when parsing \5c (an escaped antislash.) From 2421fd92a3d7ac8b767514fc9d2497f86a7ff33b Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:13:35 +0200 Subject: [PATCH 20/28] Remove tox.ini --- tox.ini | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 tox.ini diff --git a/tox.ini b/tox.ini deleted file mode 100644 index dd97398..0000000 --- a/tox.ini +++ /dev/null @@ -1,19 +0,0 @@ -[tox] -envlist = py26,py27,py32,py33,pypy,sphinx-doctests - -[testenv] -deps = pytest -changedir = {toxworkdir}/log -commands = py.test --pyargs tinycss [] - -[testenv:pypy] -setenv = TINYCSS_SKIP_SPEEDUPS_TESTS=1 - -[testenv:sphinx-doctests] -basepython = python3 -deps = - Sphinx - cssselect - lxml -changedir = docs -commands = sphinx-build -b doctest . _build/html From eddd9ae2f355d210f981561b6d0f55b26bc4d6f6 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:22:54 +0200 Subject: [PATCH 21/28] Set Read the Docs as homepage --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6139230..3c50f5b 100644 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ def run_setup(with_extension): setup( name='tinycss', version=VERSION, - url='http://packages.python.org/tinycss/', + url='http://tinycss.readthedocs.io/', license='BSD', author='Simon Sapin', author_email='simon.sapin@exyr.org', From 35b0413845795f1ac74a80c4f9ae242d17174493 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Fri, 23 Sep 2016 18:25:10 +0200 Subject: [PATCH 22/28] Version 0.4 --- tinycss/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tinycss/version.py b/tinycss/version.py index 014a8e4..f896235 100644 --- a/tinycss/version.py +++ b/tinycss/version.py @@ -1 +1 @@ -VERSION = '0.3' +VERSION = '0.4' From b334641300927beda24a713354c18c35cc97f257 Mon Sep 17 00:00:00 2001 From: grewn0uille Date: Wed, 25 Jan 2017 15:38:37 +0100 Subject: [PATCH 23/28] gilabci config --- .gitlab-ci.yml | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..fa9940e --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,46 @@ +before_script: + - pip install Cython + - pip install --upgrade -e .[test] + +.before_script_alpine: &before_alpine + before_script: + - apk add --no-cache openssl gcc + - pip install Cython + - pip install --upgrade -e .[test] + +.test_template: &test + script: + - python setup.py test + +python 2.7alpine: + image: python:2.7-alpine + <<: *before_alpine + <<: *test + +python 3.3alpine: + image: python:3.3-alpine + <<: *before_alpine + <<: *test + +python 3.4alpine: + image: python:3.4-alpine + <<: *before_alpine + <<: *test + +python 3.5alpine: + image: python:3.5-alpine + <<: *before_alpine + <<: *test + +python 3.6alpine: + image: python:3.6-alpine + <<: *before_alpine + <<: *test + +python pypy: + image: pypy:2 + <<: *test + +python pypy3: + image: pypy:3 + <<: *test From 3a72842a6335874093f82d11dd82f704391dd066 Mon Sep 17 00:00:00 2001 From: grewn0uille Date: Wed, 25 Jan 2017 15:43:22 +0100 Subject: [PATCH 24/28] add musl-dev for alpine, and setuptools --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fa9940e..09f77fe 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,11 +1,11 @@ before_script: - - pip install Cython + - pip install Cython setuptools - pip install --upgrade -e .[test] .before_script_alpine: &before_alpine before_script: - - apk add --no-cache openssl gcc - - pip install Cython + - apk add --no-cache openssl gcc musl-dev + - pip install Cython setuptools - pip install --upgrade -e .[test] .test_template: &test From b7e1efbf1a16a3486bf71ccbdd7bad25f3c2c7d1 Mon Sep 17 00:00:00 2001 From: grewn0uille Date: Wed, 25 Jan 2017 15:53:06 +0100 Subject: [PATCH 25/28] add --upgrade on pip line --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 09f77fe..8897991 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,11 +1,11 @@ before_script: - - pip install Cython setuptools + - pip install --upgrade pip Cython setuptools - pip install --upgrade -e .[test] .before_script_alpine: &before_alpine before_script: - apk add --no-cache openssl gcc musl-dev - - pip install Cython setuptools + - pip install --upgrade pip Cython setuptools - pip install --upgrade -e .[test] .test_template: &test From 27e96cfdd5b4df1f13de547181d99ba6392d4476 Mon Sep 17 00:00:00 2001 From: grewn0uille Date: Wed, 25 Jan 2017 16:00:20 +0100 Subject: [PATCH 26/28] change pip install setuptools line --- .gitlab-ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8897991..c1af496 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,11 +1,12 @@ before_script: - - pip install --upgrade pip Cython setuptools + - pip install -U setuptools + - pip install Cython - pip install --upgrade -e .[test] .before_script_alpine: &before_alpine before_script: - apk add --no-cache openssl gcc musl-dev - - pip install --upgrade pip Cython setuptools + - pip install Cython setuptools - pip install --upgrade -e .[test] .test_template: &test From 2150bc1fb425d1ac529a144fb5a7825ab1ab9276 Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Wed, 25 Jan 2017 16:18:33 +0100 Subject: [PATCH 27/28] Add missing empty lines for Flake8 --- tinycss/decoding.py | 1 + tinycss/tests/speed.py | 1 + tinycss/token_data.py | 2 ++ 3 files changed, 4 insertions(+) diff --git a/tinycss/decoding.py b/tinycss/decoding.py index 09875a5..f7c962e 100644 --- a/tinycss/decoding.py +++ b/tinycss/decoding.py @@ -104,6 +104,7 @@ class Slicer(object): def __getitem__(self, slice_): return operator.itemgetter(slice_) + Slice = Slicer() diff --git a/tinycss/tests/speed.py b/tinycss/tests/speed.py index 860e5df..c2a02fd 100644 --- a/tinycss/tests/speed.py +++ b/tinycss/tests/speed.py @@ -63,6 +63,7 @@ def parse(tokenizer_name): result.append((selector, declarations)) return result + parse_cython = functools.partial(parse, 'cython_tokenize_flat') parse_python = functools.partial(parse, 'python_tokenize_flat') diff --git a/tinycss/token_data.py b/tinycss/token_data.py index 4111e1f..cb99d8d 100644 --- a/tinycss/token_data.py +++ b/tinycss/token_data.py @@ -186,6 +186,7 @@ def _init(): for names in dispatch ) + _init() @@ -196,6 +197,7 @@ def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode): else: return '\N{REPLACEMENT CHARACTER}' # U+FFFD + UNICODE_UNESCAPE = functools.partial( re.compile(COMPILED_MACROS['unicode'], re.I).sub, _unicode_replace) From a14535a5cd344329403795ec19929c3d2593499f Mon Sep 17 00:00:00 2001 From: Guillaume Ayoub Date: Sat, 25 Mar 2017 21:54:22 +0100 Subject: [PATCH 28/28] Drop the tests with PyPy3 Travis' version of PyPy is too old now. --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 618318d..71bfc70 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,7 +6,6 @@ python: - "3.4" - "3.5" - "pypy" - - "pypy3" install: - pip install Cython