diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..c1af496
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,47 @@
+before_script:
+ - pip install -U setuptools
+ - pip install Cython
+ - pip install --upgrade -e .[test]
+
+.before_script_alpine: &before_alpine
+ before_script:
+ - apk add --no-cache openssl gcc musl-dev
+ - pip install Cython setuptools
+ - pip install --upgrade -e .[test]
+
+.test_template: &test
+ script:
+ - python setup.py test
+
+python 2.7alpine:
+ image: python:2.7-alpine
+ <<: *before_alpine
+ <<: *test
+
+python 3.3alpine:
+ image: python:3.3-alpine
+ <<: *before_alpine
+ <<: *test
+
+python 3.4alpine:
+ image: python:3.4-alpine
+ <<: *before_alpine
+ <<: *test
+
+python 3.5alpine:
+ image: python:3.5-alpine
+ <<: *before_alpine
+ <<: *test
+
+python 3.6alpine:
+ image: python:3.6-alpine
+ <<: *before_alpine
+ <<: *test
+
+python pypy:
+ image: pypy:2
+ <<: *test
+
+python pypy3:
+ image: pypy:3
+ <<: *test
diff --git a/.travis.yml b/.travis.yml
index 4391da2..71bfc70 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,13 +1,15 @@
language: python
python:
- - "2.6"
- "2.7"
- - "3.1"
- - "3.2"
+ - "3.3"
+ - "3.4"
+ - "3.5"
+ - "pypy"
install:
- - pip install --use-mirrors Cython
- - pip install --use-mirrors -e .
+ - pip install Cython
+ - pip install --upgrade -e .[test]
-script: py.test
+script:
+ - python setup.py test
diff --git a/CHANGES b/CHANGES
index fe1fcd8..27b8cf7 100644
--- a/CHANGES
+++ b/CHANGES
@@ -2,6 +2,25 @@ tinycss changelog
=================
+Version 0.4
+-----------
+
+Released on 2016-09-23.
+
+* Add an __eq__ operator to Token object.
+* Support Fonts 3.
+
+
+
+Version 0.3
+-----------
+
+Released on 2012-09-18.
+
+* Fix a bug when parsing \5c (an escaped antislash.)
+
+
+
Version 0.2
-----------
diff --git a/README.rst b/README.rst
index c8ca237..9f1879b 100644
--- a/README.rst
+++ b/README.rst
@@ -5,6 +5,7 @@ tinycss: CSS parser for Python
syntax and error handling for CSS 2.1 as well as some CSS 3 modules:
* CSS Color 3
+* CSS Fonts 3
* CSS Paged Media 3
It is designed to be easy to extend for new CSS modules and syntax,
@@ -13,7 +14,7 @@ and integrates well with cssselect_ for Selectors 3 support.
Quick facts:
* Free software: BSD licensed
-* Compatible with Python 2.6+ and 3.x
+* Compatible with Python 2.7 and 3.x
* Latest documentation `on python.org`_
* Source, issues and pull requests `on Github`_
* Releases `on PyPI`_
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
index 8613b2f..3ac9078 100644
--- a/docs/_templates/layout.html
+++ b/docs/_templates/layout.html
@@ -1,10 +1,4 @@
{% extends "!layout.html" %}
{% block extrahead %}
-
{% endblock %}
diff --git a/docs/conf.py b/docs/conf.py
index 7d9c2fe..fa7da0a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -97,7 +97,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'agogo'
+#html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
diff --git a/docs/css3.rst b/docs/css3.rst
index bee533c..91ad4c2 100644
--- a/docs/css3.rst
+++ b/docs/css3.rst
@@ -99,6 +99,17 @@ Paged Media 3
.. autoclass:: MarginRule
+.. module:: tinycss.fonts3
+
+Fonts 3
+-------
+
+.. autoclass:: CSSFonts3Parser
+.. autoclass:: FontFaceRule
+.. autoclass:: FontFeatureValuesRule
+.. autoclass:: FontFeatureRule
+
+
Other CSS modules
-----------------
diff --git a/docs/index.rst b/docs/index.rst
index 2c5c8a4..541ff9d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -4,9 +4,10 @@
Requirements
------------
-tinycss is tested on CPython 2.6, 2.7, 3.1 and 3.2 as well as PyPy 1.8;
-it should work on any implementation of **Python 2.6 or later version
-(including 3.x)** of the language.
+`tinycss is tested `_ on CPython 2.7, 3.3,
+3.4 and 3.5 as well as PyPy 5.3 and PyPy3 2.4; it should work on any
+implementation of **Python 2.7 or later version (including 3.x)** of the
+language.
Cython_ is used for optional accelerators but is only required for
development versions on tinycss.
diff --git a/setup.cfg b/setup.cfg
index 836ca93..32f40ab 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,3 +6,13 @@ build-dir = docs/_build
[upload_sphinx] # Sphinx-PyPI-upload
upload-dir = docs/_build/html
+[aliases]
+test = pytest
+
+[tool:pytest]
+addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference
+norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference
+flake8-ignore = docs/conf.py ALL
+isort_ignore =
+ docs/conf.py
+ setup.py
diff --git a/setup.py b/setup.py
index 6c0ec6d..3c50f5b 100644
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,10 @@
+import os.path
import re
import sys
-import os.path
-from setuptools import setup, Extension
from distutils.errors import (
CCompilerError, DistutilsExecError, DistutilsPlatformError)
+from setuptools import Extension, setup
+
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Version
@@ -15,13 +16,15 @@
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info > (2, 6):
- # 2.6's distutils.msvc9compiler can raise an IOError when failing to
- # find the compiler
- ext_errors += (IOError,)
+ # 2.6's distutils.msvc9compiler can raise an IOError when failing to
+ # find the compiler
+ ext_errors += (IOError,)
+
class BuildFailed(Exception):
pass
+
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
@@ -46,6 +49,10 @@ def build_extension(self, ext):
README = fd.read().decode('utf8')
+needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
+pytest_runner = ['pytest-runner'] if needs_pytest else []
+
+
def run_setup(with_extension):
if with_extension:
extension_path = os.path.join('tinycss', 'speedups')
@@ -55,11 +62,11 @@ def run_setup(with_extension):
else:
extension_path += '.c'
if not os.path.exists(extension_path):
- print ("WARNING: Trying to build without Cython, but "
- "pre-generated '%s' does not seem to be available."
- % extension_path)
+ print("WARNING: Trying to build without Cython, but "
+ "pre-generated '%s' does not seem to be available."
+ % extension_path)
else:
- print ('Building without Cython.')
+ print('Building without Cython.')
kwargs = dict(
cmdclass=dict(build_ext=ve_build_ext),
ext_modules=[Extension('tinycss.speedups',
@@ -71,23 +78,30 @@ def run_setup(with_extension):
setup(
name='tinycss',
version=VERSION,
- url='http://packages.python.org/tinycss/',
+ url='http://tinycss.readthedocs.io/',
license='BSD',
author='Simon Sapin',
author_email='simon.sapin@exyr.org',
description='tinycss is a complete yet simple CSS parser for Python.',
long_description=README,
classifiers=[
- 'Development Status :: 3 - Alpha',
+ 'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.1',
- 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Programming Language :: Python :: Implementation :: PyPy',
],
+ setup_requires=pytest_runner,
+ tests_require=[
+ 'pytest-cov', 'pytest-flake8', 'pytest-isort', 'pytest-runner'],
+ extras_require={'test': (
+ 'pytest-runner', 'pytest-cov', 'pytest-flake8', 'pytest-isort')},
packages=['tinycss', 'tinycss.tests'],
**kwargs
)
diff --git a/tinycss/__init__.py b/tinycss/__init__.py
index 9eca2b1..aba135c 100644
--- a/tinycss/__init__.py
+++ b/tinycss/__init__.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss
-------
@@ -9,17 +9,18 @@
:license: BSD, see LICENSE for more details.
"""
-import sys
-
from .version import VERSION
-__version__ = VERSION
from .css21 import CSS21Parser
from .page3 import CSSPage3Parser
+from .fonts3 import CSSFonts3Parser
+__version__ = VERSION
+
PARSER_MODULES = {
'page3': CSSPage3Parser,
+ 'fonts3': CSSFonts3Parser,
}
@@ -30,6 +31,8 @@ def make_parser(*features, **kwargs):
Positional arguments are base classes the new parser class will extend.
The string ``'page3'`` is accepted as short for
:class:`~page3.CSSPage3Parser`.
+ The string ``'fonts3'`` is accepted as short for
+ :class:`~fonts3.CSSFonts3Parser`.
:param kwargs:
Keyword arguments are passed to the parser’s constructor.
:returns:
diff --git a/tinycss/color3.py b/tinycss/color3.py
index 187196e..92eed46 100644
--- a/tinycss/color3.py
+++ b/tinycss/color3.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.colors3
---------------
@@ -13,7 +13,8 @@
:license: BSD, see LICENSE for more details.
"""
-from __future__ import unicode_literals, division
+from __future__ import division, unicode_literals
+
import collections
import itertools
import re
diff --git a/tinycss/css21.py b/tinycss/css21.py
index 51e6529..e611a5e 100644
--- a/tinycss/css21.py
+++ b/tinycss/css21.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.css21
-------------
@@ -11,13 +11,15 @@
"""
from __future__ import unicode_literals
+
from itertools import chain, islice
from .decoding import decode
+from .parsing import (
+ ParseError, remove_whitespace, split_on_comma, strip_whitespace,
+ validate_any, validate_value)
from .token_data import TokenList
from .tokenizer import tokenize_grouped
-from .parsing import (strip_whitespace, remove_whitespace, split_on_comma,
- validate_value, validate_block, validate_any, ParseError)
# stylesheet : [ CDO | CDC | S | statement ]*;
@@ -293,7 +295,6 @@ def __repr__(self):
' {0.uri}>'.format(self))
-
def _remove_at_charset(tokens):
"""Remove any valid @charset at the beggining of a token stream.
@@ -307,8 +308,8 @@ def _remove_at_charset(tokens):
header = list(islice(tokens, 4))
if [t.type for t in header] == ['ATKEYWORD', 'S', 'STRING', ';']:
atkw, space, string, semicolon = header
- if ((atkw.value, space.value) == ('@charset', ' ')
- and string.as_css()[0] == '"'):
+ if ((atkw.value, space.value) == ('@charset', ' ') and
+ string.as_css()[0] == '"'):
# Found a valid @charset rule, only keep what’s after it.
return tokens
return chain(header, tokens)
@@ -331,7 +332,7 @@ class CSS21Parser(object):
# User API:
def parse_stylesheet_file(self, css_file, protocol_encoding=None,
- linking_encoding=None, document_encoding=None):
+ linking_encoding=None, document_encoding=None):
"""Parse a stylesheet from a file or filename.
Character encoding-related parameters and behavior are the same
@@ -512,8 +513,9 @@ def parse_at_rule(self, rule, previous_rules, errors, context):
raise ParseError(rule, '@page rule not allowed in ' + context)
selector, specificity = self.parse_page_selector(rule.head)
if rule.body is None:
- raise ParseError(rule,
- 'invalid {0} rule: missing block'.format(rule.at_keyword))
+ raise ParseError(
+ rule, 'invalid {0} rule: missing block'.format(
+ rule.at_keyword))
declarations, at_rules, rule_errors = \
self.parse_declarations_and_at_rules(rule.body, '@page')
errors.extend(rule_errors)
@@ -527,32 +529,34 @@ def parse_at_rule(self, rule, previous_rules, errors, context):
raise ParseError(rule, 'expected media types for @media')
media = self.parse_media(rule.head)
if rule.body is None:
- raise ParseError(rule,
- 'invalid {0} rule: missing block'.format(rule.at_keyword))
+ raise ParseError(
+ rule, 'invalid {0} rule: missing block'.format(
+ rule.at_keyword))
rules, rule_errors = self.parse_rules(rule.body, '@media')
errors.extend(rule_errors)
return MediaRule(media, rules, rule.line, rule.column)
elif rule.at_keyword == '@import':
if context != 'stylesheet':
- raise ParseError(rule,
- '@import rule not allowed in ' + context)
+ raise ParseError(
+ rule, '@import rule not allowed in ' + context)
for previous_rule in previous_rules:
if previous_rule.at_keyword not in ('@charset', '@import'):
if previous_rule.at_keyword:
type_ = 'an {0} rule'.format(previous_rule.at_keyword)
else:
type_ = 'a ruleset'
- raise ParseError(previous_rule,
+ raise ParseError(
+ previous_rule,
'@import rule not allowed after ' + type_)
head = rule.head
if not head:
- raise ParseError(rule,
- 'expected URI or STRING for @import rule')
+ raise ParseError(
+ rule, 'expected URI or STRING for @import rule')
if head[0].type not in ('URI', 'STRING'):
- raise ParseError(rule,
- 'expected URI or STRING for @import rule, got '
- + head[0].type)
+ raise ParseError(
+ rule, 'expected URI or STRING for @import rule, got ' +
+ head[0].type)
uri = head[0].value
media = self.parse_media(strip_whitespace(head[1:]))
if rule.body is not None:
@@ -565,8 +569,9 @@ def parse_at_rule(self, rule, previous_rules, errors, context):
raise ParseError(rule, 'mis-placed or malformed @charset rule')
else:
- raise ParseError(rule, 'unknown at-rule in {0} context: {1}'
- .format(context, rule.at_keyword))
+ raise ParseError(
+ rule, 'unknown at-rule in {0} context: {1}'.format(
+ context, rule.at_keyword))
def parse_media(self, tokens):
"""For CSS 2.1, parse a list of media types.
@@ -588,8 +593,9 @@ def parse_media(self, tokens):
if types == ['IDENT']:
media_types.append(part[0].value)
else:
- raise ParseError(tokens[0], 'expected a media type'
- + ((', got ' + ', '.join(types)) if types else ''))
+ raise ParseError(
+ tokens[0], 'expected a media type' +
+ ((', got ' + ', '.join(types)) if types else ''))
return media_types
def parse_page_selector(self, tokens):
@@ -607,8 +613,8 @@ def parse_page_selector(self, tokens):
"""
if not tokens:
return None, (0, 0)
- if (len(tokens) == 2 and tokens[0].type == ':'
- and tokens[1].type == 'IDENT'):
+ if (len(tokens) == 2 and tokens[0].type == ':' and
+ tokens[1].type == 'IDENT'):
pseudo_class = tokens[1].value
specificity = {
'first': (1, 0), 'left': (0, 1), 'right': (0, 1),
@@ -677,8 +683,9 @@ def parse_ruleset(self, first_token, tokens):
for one ruleset.
:return:
a tuple of a :class:`RuleSet` and an error list.
- The errors are recovered :class:`~.parsing.ParseError` in declarations.
- (Parsing continues from the next declaration on such errors.)
+ The errors are recovered :class:`~.parsing.ParseError` in
+ declarations. (Parsing continues from the next declaration on such
+ errors.)
:raises:
:class:`~.parsing.ParseError` if the selector is invalid for the
core grammar.
@@ -765,8 +772,9 @@ def parse_declaration(self, tokens):
# CSS syntax is case-insensitive
property_name = name_token.value.lower()
else:
- raise ParseError(name_token,
- 'expected a property name, got {0}'.format(name_token.type))
+ raise ParseError(
+ name_token, 'expected a property name, got {0}'.format(
+ name_token.type))
token = name_token # In case ``tokens`` is now empty
for token in tokens:
diff --git a/tinycss/decoding.py b/tinycss/decoding.py
index 6303e1a..f7c962e 100644
--- a/tinycss/decoding.py
+++ b/tinycss/decoding.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.decoding
----------------
@@ -12,11 +12,9 @@
from __future__ import unicode_literals
-from binascii import unhexlify
import operator
import re
-import sys
-
+from binascii import unhexlify
__all__ = ['decode'] # Everything else is implementation detail
@@ -106,6 +104,7 @@ class Slicer(object):
def __getitem__(self, slice_):
return operator.itemgetter(slice_)
+
Slice = Slicer()
@@ -116,101 +115,101 @@ def __getitem__(self, slice_):
ENCODING_MAGIC_NUMBERS = [
((Slice[:], ''), re.compile(
- hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22')
- + b'([^\x22]*?)'
- + hex2re('22 3B')).match),
+ hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22') +
+ b'([^\x22]*?)' +
+ hex2re('22 3B')).match),
('UTF-8', re.compile(
hex2re('EF BB BF')).match),
((Slice[:], ''), re.compile(
- hex2re('40 63 68 61 72 73 65 74 20 22')
- + b'([^\x22]*?)'
- + hex2re('22 3B')).match),
+ hex2re('40 63 68 61 72 73 65 74 20 22') +
+ b'([^\x22]*?)' +
+ hex2re('22 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('FE FF 00 40 00 63 00 68 00 61 00 72 00 73 00 65 00'
- '74 00 20 00 22')
- + b'((\x00[^\x22])*?)'
- + hex2re('00 22 00 3B')).match),
+ '74 00 20 00 22') +
+ b'((\x00[^\x22])*?)' +
+ hex2re('00 22 00 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('00 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00'
- '20 00 22')
- + b'((\x00[^\x22])*?)'
- + hex2re('00 22 00 3B')).match),
+ '20 00 22') +
+ b'((\x00[^\x22])*?)' +
+ hex2re('00 22 00 3B')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('FF FE 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74'
- '00 20 00 22 00')
- + b'(([^\x22]\x00)*?)'
- + hex2re('22 00 3B 00')).match),
+ '00 20 00 22 00') +
+ b'(([^\x22]\x00)*?)' +
+ hex2re('22 00 3B 00')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00 20'
- '00 22 00')
- + b'(([^\x22]\x00)*?)'
- + hex2re('22 00 3B 00')).match),
+ '00 22 00') +
+ b'(([^\x22]\x00)*?)' +
+ hex2re('22 00 3B 00')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00'
'00 00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00'
- '00 74 00 00 00 20 00 00 00 22')
- + b'((\x00\x00\x00[^\x22])*?)'
- + hex2re('00 00 00 22 00 00 00 3B')).match),
+ '00 74 00 00 00 20 00 00 00 22') +
+ b'((\x00\x00\x00[^\x22])*?)' +
+ hex2re('00 00 00 22 00 00 00 3B')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00'
'00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00'
- '00 20 00 00 00 22')
- + b'((\x00\x00\x00[^\x22])*?)'
- + hex2re('00 00 00 22 00 00 00 3B')).match),
-
-
-# Python does not support 2143 or 3412 endianness, AFAIK.
-# I guess we could fix it up ourselves but meh. Patches welcome.
-
-# ((Slice[2::4], '-2143'), re.compile(
-# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
-# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
-# '74 00 00 00 20 00 00 00 22 00')
-# + b'((\x00\x00[^\x22]\x00)*?)'
-# + hex2re('00 00 22 00 00 00 3B 00')).match),
-
-# ((Slice[2::4], '-2143'), re.compile(
-# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
-# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
-# '20 00 00 00 22 00')
-# + b'((\x00\x00[^\x22]\x00)*?)'
-# + hex2re('00 00 22 00 00 00 3B 00')).match),
-
-# ((Slice[1::4], '-3412'), re.compile(
-# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
-# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
-# '00 00 00 20 00 00 00 22 00 00')
-# + b'((\x00[^\x22]\x00\x00)*?)'
-# + hex2re('00 22 00 00 00 3B 00 00')).match),
-
-# ((Slice[1::4], '-3412'), re.compile(
-# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
-# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
-# '00 00 00 22 00 00')
-# + b'((\x00[^\x22]\x00\x00)*?)'
-# + hex2re('00 22 00 00 00 3B 00 00')).match),
+ '00 20 00 00 00 22') +
+ b'((\x00\x00\x00[^\x22])*?)' +
+ hex2re('00 00 00 22 00 00 00 3B')).match),
+
+
+ # Python does not support 2143 or 3412 endianness, AFAIK.
+ # I guess we could fix it up ourselves but meh. Patches welcome.
+
+ # ((Slice[2::4], '-2143'), re.compile(
+ # hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
+ # '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
+ # '74 00 00 00 20 00 00 00 22 00') +
+ # b'((\x00\x00[^\x22]\x00)*?)' +
+ # hex2re('00 00 22 00 00 00 3B 00')).match),
+
+ # ((Slice[2::4], '-2143'), re.compile(
+ # hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
+ # '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
+ # '20 00 00 00 22 00') +
+ # b'((\x00\x00[^\x22]\x00)*?)' +
+ # hex2re('00 00 22 00 00 00 3B 00')).match),
+
+ # ((Slice[1::4], '-3412'), re.compile(
+ # hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
+ # '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
+ # '00 00 00 20 00 00 00 22 00 00') +
+ # b'((\x00[^\x22]\x00\x00)*?)' +
+ # hex2re('00 22 00 00 00 3B 00 00')).match),
+
+ # ((Slice[1::4], '-3412'), re.compile(
+ # hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
+ # '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
+ # '00 00 00 22 00 00') +
+ # b'((\x00[^\x22]\x00\x00)*?)' +
+ # hex2re('00 22 00 00 00 3B 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61'
'00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00'
- '00 00 20 00 00 00 22 00 00 00')
- + b'(([^\x22]\x00\x00\x00)*?)'
- + hex2re('22 00 00 00 3B 00 00 00')).match),
+ '00 00 20 00 00 00 22 00 00 00') +
+ b'(([^\x22]\x00\x00\x00)*?)' +
+ hex2re('22 00 00 00 3B 00 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00 72'
'00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20 00'
- '00 00 22 00 00 00')
- + b'(([^\x22]\x00\x00\x00)*?)'
- + hex2re('22 00 00 00 3B 00 00 00')).match),
+ '00 00 22 00 00 00') +
+ b'(([^\x22]\x00\x00\x00)*?)' +
+ hex2re('22 00 00 00 3B 00 00 00')).match),
('UTF-32-BE', re.compile(
hex2re('00 00 FE FF')).match),
@@ -218,11 +217,11 @@ def __getitem__(self, slice_):
('UTF-32-LE', re.compile(
hex2re('FF FE 00 00')).match),
-# ('UTF-32-2143', re.compile(
-# hex2re('00 00 FF FE')).match),
+ # ('UTF-32-2143', re.compile(
+ # hex2re('00 00 FF FE')).match),
-# ('UTF-32-3412', re.compile(
-# hex2re('FE FF 00 00')).match),
+ # ('UTF-32-3412', re.compile(
+ # hex2re('FE FF 00 00')).match),
('UTF-16-BE', re.compile(
hex2re('FE FF')).match),
@@ -231,24 +230,24 @@ def __getitem__(self, slice_):
hex2re('FF FE')).match),
-# Some of there are supported by Python, but I didn’t bother.
-# You know the story with patches ...
+ # Some of there are supported by Python, but I didn’t bother.
+ # You know the story with patches ...
-# # as specified, transcoded from EBCDIC to ASCII
-# ('as_specified-EBCDIC', re.compile(
-# hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
-# + b'([^\x7F]*?)'
-# + hex2re('7F 5E')).match),
+ # # as specified, transcoded from EBCDIC to ASCII
+ # ('as_specified-EBCDIC', re.compile(
+ # hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
+ # + b'([^\x7F]*?)'
+ # + hex2re('7F 5E')).match),
-# # as specified, transcoded from IBM1026 to ASCII
-# ('as_specified-IBM1026', re.compile(
-# hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
-# + b'([^\xFC]*?)'
-# + hex2re('FC 5E')).match),
+ # # as specified, transcoded from IBM1026 to ASCII
+ # ('as_specified-IBM1026', re.compile(
+ # hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
+ # + b'([^\xFC]*?)'
+ # + hex2re('FC 5E')).match),
-# # as specified, transcoded from GSM 03.38 to ASCII
-# ('as_specified-GSM_03.38', re.compile(
-# hex2re('00 63 68 61 72 73 65 74 20 22')
-# + b'([^\x22]*?)'
-# + hex2re('22 3B')).match),
+ # # as specified, transcoded from GSM 03.38 to ASCII
+ # ('as_specified-GSM_03.38', re.compile(
+ # hex2re('00 63 68 61 72 73 65 74 20 22')
+ # + b'([^\x22]*?)'
+ # + hex2re('22 3B')).match),
]
diff --git a/tinycss/fonts3.py b/tinycss/fonts3.py
new file mode 100644
index 0000000..c1f96a6
--- /dev/null
+++ b/tinycss/fonts3.py
@@ -0,0 +1,200 @@
+# coding: utf-8
+"""
+ tinycss.colors3
+ ---------------
+
+ Parser for CSS 3 Fonts syntax:
+ https://www.w3.org/TR/css-fonts-3/
+
+ Adds support for font-face and font-feature-values rules.
+
+ :copyright: (c) 2016 by Kozea.
+ :license: BSD, see LICENSE for more details.
+"""
+
+from __future__ import division, unicode_literals
+
+from .css21 import CSS21Parser, ParseError
+
+
+class FontFaceRule(object):
+ """A parsed at-rule for font faces.
+
+ .. attribute:: at_keyword
+
+ Always ``'@font-face'``.
+
+ .. attribute:: declarations
+
+ A list of :class:`~.css21.Declaration` objects.
+
+ .. attribute:: line
+
+ Source line where this was read.
+
+ .. attribute:: column
+
+ Source column where this was read.
+
+ """
+
+ def __init__(self, at_keyword, declarations, line, column):
+ assert at_keyword == '@font-face'
+ self.at_keyword = at_keyword
+ self.declarations = declarations
+ self.line = line
+ self.column = column
+
+
+class FontFeatureValuesRule(object):
+ """A parsed at-rule for font feature values.
+
+ .. attribute:: at_keyword
+
+ Always ``'@font-feature-values'``.
+
+ .. attribute:: line
+
+ Source line where this was read.
+
+ .. attribute:: column
+
+ Source column where this was read.
+
+ .. attribute:: at_rules
+
+ The list of parsed at-rules inside the @font-feature-values block, in
+ source order.
+
+ .. attribute:: family_names
+
+ A list of strings representing font families.
+
+ """
+
+ def __init__(self, at_keyword, at_rules, family_names, line, column):
+ assert at_keyword == '@font-feature-values'
+ self.at_keyword = at_keyword
+ self.family_names = family_names
+ self.at_rules = at_rules
+ self.line = line
+ self.column = column
+
+
+class FontFeatureRule(object):
+ """A parsed at-rule for font features.
+
+ .. attribute:: at_keyword
+
+ One of the 16 following strings:
+
+ * ``@stylistic``
+ * ``@styleset``
+ * ``@character-variant``
+ * ``@swash``
+ * ``@ornaments``
+ * ``@annotation``
+
+ .. attribute:: declarations
+
+ A list of :class:`~.css21.Declaration` objects.
+
+ .. attribute:: line
+
+ Source line where this was read.
+
+ .. attribute:: column
+
+ Source column where this was read.
+
+ """
+
+ def __init__(self, at_keyword, declarations, line, column):
+ self.at_keyword = at_keyword
+ self.declarations = declarations
+ self.line = line
+ self.column = column
+
+
+class CSSFonts3Parser(CSS21Parser):
+ """Extend :class:`~.css21.CSS21Parser` for `CSS 3 Fonts`_ syntax.
+
+ .. _CSS 3 Fonts: https://www.w3.org/TR/css-fonts-3/
+
+ """
+
+ FONT_FEATURE_VALUES_AT_KEYWORDS = [
+ '@stylistic',
+ '@styleset',
+ '@character-variant',
+ '@swash',
+ '@ornaments',
+ '@annotation',
+ ]
+
+ def parse_at_rule(self, rule, previous_rules, errors, context):
+ if rule.at_keyword == '@font-face':
+ if rule.head:
+ raise ParseError(
+ rule.head[0],
+ 'unexpected {0} token in {1} rule header'.format(
+ rule.head[0].type, rule.at_keyword))
+ declarations, body_errors = self.parse_declaration_list(rule.body)
+ errors.extend(body_errors)
+ return FontFaceRule(
+ rule.at_keyword, declarations, rule.line, rule.column)
+ elif rule.at_keyword == '@font-feature-values':
+ family_names = tuple(
+ self.parse_font_feature_values_family_names(rule.head))
+ at_rules, body_errors = (
+ self.parse_rules(rule.body or [], '@font-feature-values'))
+ errors.extend(body_errors)
+ return FontFeatureValuesRule(
+ rule.at_keyword, at_rules, family_names,
+ rule.line, rule.column)
+ elif rule.at_keyword in self.FONT_FEATURE_VALUES_AT_KEYWORDS:
+ if context != '@font-feature-values':
+ raise ParseError(
+ rule, '{0} rule not allowed in {1}'.format(
+ rule.at_keyword, context))
+ declarations, body_errors = self.parse_declaration_list(rule.body)
+ errors.extend(body_errors)
+ return FontFeatureRule(
+ rule.at_keyword, declarations, rule.line, rule.column)
+ return super(CSSFonts3Parser, self).parse_at_rule(
+ rule, previous_rules, errors, context)
+
+ def parse_font_feature_values_family_names(self, tokens):
+ """Parse an @font-feature-values selector.
+
+ :param tokens:
+ An iterable of token, typically from the ``head`` attribute of
+ an unparsed :class:`AtRule`.
+ :returns:
+ A generator of strings representing font families.
+ :raises:
+ :class:`~.parsing.ParseError` on invalid selectors
+
+ """
+ family = ''
+ current_string = False
+ for token in tokens:
+ if token.type == 'DELIM' and token.value == ',' and family:
+ yield family
+ family = ''
+ current_string = False
+ elif token.type == 'STRING' and not family and (
+ current_string is False):
+ family = token.value
+ current_string = True
+ elif token.type == 'IDENT' and not current_string:
+ if family:
+ family += ' '
+ family += token.value
+ elif token.type != 'S':
+ family = ''
+ break
+ if family:
+ yield family
+ else:
+ raise ParseError(token, 'invalid @font-feature-values selector')
diff --git a/tinycss/page3.py b/tinycss/page3.py
index 1d1a51e..6a89252 100644
--- a/tinycss/page3.py
+++ b/tinycss/page3.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.page3
------------------
@@ -12,7 +12,8 @@
:license: BSD, see LICENSE for more details.
"""
-from __future__ import unicode_literals, division
+from __future__ import division, unicode_literals
+
from .css21 import CSS21Parser, ParseError
@@ -110,16 +111,18 @@ class CSSPage3Parser(CSS21Parser):
def parse_at_rule(self, rule, previous_rules, errors, context):
if rule.at_keyword in self.PAGE_MARGIN_AT_KEYWORDS:
if context != '@page':
- raise ParseError(rule,
- '%s rule not allowed in %s' % (rule.at_keyword, context))
+ raise ParseError(
+ rule, '{0} rule not allowed in {1}'.format(
+ rule.at_keyword, context))
if rule.head:
- raise ParseError(rule.head[0],
- 'unexpected %s token in %s rule header'
- % (rule.head[0].type, rule.at_keyword))
+ raise ParseError(
+ rule.head[0],
+ 'unexpected {0} token in {1} rule header'.format(
+ rule.head[0].type, rule.at_keyword))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
- return MarginRule(rule.at_keyword, declarations,
- rule.line, rule.column)
+ return MarginRule(
+ rule.at_keyword, declarations, rule.line, rule.column)
return super(CSSPage3Parser, self).parse_at_rule(
rule, previous_rules, errors, context)
@@ -130,7 +133,7 @@ def parse_page_selector(self, head):
The ``head`` attribute of an unparsed :class:`AtRule`.
:returns:
A page selector. For CSS 2.1, this is 'first', 'left', 'right'
- or None.
+ or None. 'blank' is added by GCPM.
:raises:
:class`~parsing.ParseError` on invalid selectors
@@ -147,11 +150,12 @@ def parse_page_selector(self, head):
else:
name = None
name_specificity = (0,)
- if (len(head) == 2 and head[0].type == ':'
- and head[1].type == 'IDENT'):
+ if (len(head) == 2 and head[0].type == ':' and
+ head[1].type == 'IDENT'):
pseudo_class = head[1].value
specificity = {
- 'first': (1, 0), 'left': (0, 1), 'right': (0, 1),
+ 'first': (1, 0), 'blank': (1, 0),
+ 'left': (0, 1), 'right': (0, 1),
}.get(pseudo_class)
if specificity:
return (name, pseudo_class), (name_specificity + specificity)
diff --git a/tinycss/parsing.py b/tinycss/parsing.py
index 86e93c0..c01e9c2 100644
--- a/tinycss/parsing.py
+++ b/tinycss/parsing.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.parsing
---------------
@@ -95,6 +95,7 @@ def validate_value(tokens):
else:
validate_any(token, 'property value')
+
def validate_block(tokens, context):
"""
:raises:
@@ -132,8 +133,8 @@ def validate_any(token, context):
adjective = 'unmatched'
else:
adjective = 'unexpected'
- raise ParseError(token,
- '{0} {1} token in {2}'.format(adjective, type_, context))
+ raise ParseError(
+ token, '{0} {1} token in {2}'.format(adjective, type_, context))
class ParseError(ValueError):
diff --git a/tinycss/speedups.pyx b/tinycss/speedups.pyx
index d90a09f..49edfa2 100644
--- a/tinycss/speedups.pyx
+++ b/tinycss/speedups.pyx
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.speedups
----------------
@@ -130,8 +130,8 @@ def tokenize_flat(css_source, int ignore_comments=1):
value = match.group(1)
value = float(value) if '.' in value else int(value)
unit = match.group(2)
- unit = unicode_unescape(unit)
unit = simple_unescape(unit)
+ unit = unicode_unescape(unit)
unit = unit.lower() # normalize
elif type_ == PERCENTAGE:
value = css_value[:-1]
@@ -145,20 +145,20 @@ def tokenize_flat(css_source, int ignore_comments=1):
value = int(value)
type_name = 'INTEGER'
elif type_ in (IDENT, ATKEYWORD, HASH, FUNCTION):
- value = unicode_unescape(css_value)
- value = simple_unescape(value)
+ value = simple_unescape(css_value)
+ value = unicode_unescape(value)
elif type_ == URI:
value = match.group(1)
if value and value[0] in '"\'':
value = value[1:-1] # Remove quotes
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
elif type_ == STRING:
value = css_value[1:-1] # Remove quotes
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
# BAD_STRING can only be one of:
# * Unclosed string at the end of the stylesheet:
# Close the string, but this is not an error.
@@ -171,8 +171,8 @@ def tokenize_flat(css_source, int ignore_comments=1):
type_name = 'STRING'
value = css_value[1:] # Remove quote
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
else:
value = css_value
token = CToken(type_name, css_value, value, unit, line, column)
diff --git a/tinycss/tests/__init__.py b/tinycss/tests/__init__.py
index c7a89e0..af7a49e 100644
--- a/tinycss/tests/__init__.py
+++ b/tinycss/tests/__init__.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Test suite for tinycss
----------------------
@@ -10,6 +10,14 @@
from __future__ import unicode_literals
+import sys
+
+
+# Awful workaround to fix isort's "sys.setdefaultencoding('utf-8')".
+if sys.version_info[0] == 2:
+ reload(sys) # noqa
+ sys.setdefaultencoding('ascii')
+
def assert_errors(errors, expected_errors):
"""Test not complete error messages but only substrings."""
diff --git a/tinycss/tests/speed.py b/tinycss/tests/speed.py
index 2777d4b..c2a02fd 100644
--- a/tinycss/tests/speed.py
+++ b/tinycss/tests/speed.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Speed tests
-----------
@@ -11,13 +11,13 @@
"""
-from __future__ import unicode_literals, division
+from __future__ import division, unicode_literals
-import sys
-import os.path
import contextlib
-import timeit
import functools
+import os.path
+import sys
+import timeit
from cssutils import parseString
@@ -25,7 +25,6 @@
from ..css21 import CSS21Parser
from ..parsing import remove_whitespace
-
CSS_REPEAT = 4
TIMEIT_REPEAT = 3
TIMEIT_NUMBER = 20
@@ -64,6 +63,7 @@ def parse(tokenizer_name):
result.append((selector, declarations))
return result
+
parse_cython = functools.partial(parse, 'cython_tokenize_flat')
parse_python = functools.partial(parse, 'python_tokenize_flat')
@@ -82,8 +82,6 @@ def parse_cssutils():
def check_consistency():
result = parse_python()
- #import pprint
- #pprint.pprint(result)
assert len(result) > 0
if tokenizer.cython_tokenize_flat:
assert parse_cython() == result
diff --git a/tinycss/tests/test_api.py b/tinycss/tests/test_api.py
index 01caa3f..a0510b9 100644
--- a/tinycss/tests/test_api.py
+++ b/tinycss/tests/test_api.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for the public API
------------------------
@@ -9,10 +9,8 @@
from __future__ import unicode_literals
-import itertools
from pytest import raises
-
from tinycss import make_parser
from tinycss.page3 import CSSPage3Parser
diff --git a/tinycss/tests/test_color3.py b/tinycss/tests/test_color3.py
index 3d86785..e48771e 100644
--- a/tinycss/tests/test_color3.py
+++ b/tinycss/tests/test_color3.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for the CSS 3 color parser
--------------------------------
@@ -11,8 +11,7 @@
from __future__ import unicode_literals
import pytest
-
-from tinycss.color3 import parse_color_string, hsl_to_rgb
+from tinycss.color3 import hsl_to_rgb, parse_color_string
@pytest.mark.parametrize(('css_source', 'expected_result'), [
@@ -172,30 +171,30 @@ def test_color(css_source, expected_result):
@pytest.mark.parametrize(('hsl', 'expected_rgb'), [
# http://en.wikipedia.org/wiki/HSL_and_HSV#Examples
- ((0, 0, 100 ), (1, 1, 1 )),
- ((127, 0, 100 ), (1, 1, 1 )),
- ((0, 0, 50 ), (0.5, 0.5, 0.5 )),
- ((127, 0, 50 ), (0.5, 0.5, 0.5 )),
- ((0, 0, 0 ), (0, 0, 0 )),
- ((127, 0, 0 ), (0, 0, 0 )),
- ((0, 100, 50 ), (1, 0, 0 )),
- ((60, 100, 37.5), (0.75, 0.75, 0 )),
- ((780, 100, 37.5), (0.75, 0.75, 0 )),
- ((-300, 100, 37.5), (0.75, 0.75, 0 )),
- ((120, 100, 25 ), (0, 0.5, 0 )),
- ((180, 100, 75 ), (0.5, 1, 1 )),
- ((240, 100, 75 ), (0.5, 0.5, 1 )),
- ((300, 50, 50 ), (0.75, 0.25, 0.75 )),
- ((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)),
- ((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)),
- ((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)),
- ((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)),
- ((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)),
- ((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)),
- ((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)),
- ((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)),
- ((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)),
- ((240.5, 29, 60.7), (0.495, 0.493, 0.721)),
+ ((0, 0, 100 ), (1, 1, 1 )), # noqa
+ ((127, 0, 100 ), (1, 1, 1 )), # noqa
+ ((0, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa
+ ((127, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa
+ ((0, 0, 0 ), (0, 0, 0 )), # noqa
+ ((127, 0, 0 ), (0, 0, 0 )), # noqa
+ ((0, 100, 50 ), (1, 0, 0 )), # noqa
+ ((60, 100, 37.5), (0.75, 0.75, 0 )), # noqa
+ ((780, 100, 37.5), (0.75, 0.75, 0 )), # noqa
+ ((-300, 100, 37.5), (0.75, 0.75, 0 )), # noqa
+ ((120, 100, 25 ), (0, 0.5, 0 )), # noqa
+ ((180, 100, 75 ), (0.5, 1, 1 )), # noqa
+ ((240, 100, 75 ), (0.5, 0.5, 1 )), # noqa
+ ((300, 50, 50 ), (0.75, 0.25, 0.75 )), # noqa
+ ((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)), # noqa
+ ((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)), # noqa
+ ((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)), # noqa
+ ((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)), # noqa
+ ((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)), # noqa
+ ((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)), # noqa
+ ((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)), # noqa
+ ((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)), # noqa
+ ((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)), # noqa
+ ((240.5, 29, 60.7), (0.495, 0.493, 0.721)), # noqa
])
def test_hsl(hsl, expected_rgb):
for got, expected in zip(hsl_to_rgb(*hsl), expected_rgb):
diff --git a/tinycss/tests/test_css21.py b/tinycss/tests/test_css21.py
index 48626d7..a8ca956 100644
--- a/tinycss/tests/test_css21.py
+++ b/tinycss/tests/test_css21.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for the CSS 2.1 parser
----------------------------
@@ -9,16 +9,16 @@
from __future__ import unicode_literals
+
import io
import os
import tempfile
import pytest
-
from tinycss.css21 import CSS21Parser
-from .test_tokenizer import jsonify
from . import assert_errors
+from .test_tokenizer import jsonify
def parse_bytes(css_bytes, kwargs):
@@ -49,7 +49,7 @@ def parse_filename(css_bytes, kwargs):
('@import "é";'.encode('utf8'), {}, 'é'),
('@import "é";'.encode('utf16'), {}, 'é'), # with a BOM
('@import "é";'.encode('latin1'), {}, 'é'),
- ('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # latin1 mojibake
+ ('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # lat1 mojibake
('@charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {}, '£'),
(' @charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {},
'\x81\x92'),
@@ -77,7 +77,8 @@ def test_bytes(css_bytes, kwargs, expected_result, parse):
('foo{} @lipsum{} bar{}', 2,
['unknown at-rule in stylesheet context: @lipsum']),
('@charset "ascii"; foo {}', 1, []),
- (' @charset "ascii"; foo {}', 1, ['mis-placed or malformed @charset rule']),
+ (' @charset "ascii"; foo {}', 1, [
+ 'mis-placed or malformed @charset rule']),
('@charset ascii; foo {}', 1, ['mis-placed or malformed @charset rule']),
('foo {} @charset "ascii";', 1, ['mis-placed or malformed @charset rule']),
])
@@ -109,8 +110,8 @@ def test_at_rules(css_source, expected_rules, expected_errors):
('a{b:4}', [('a', [('b', [('INTEGER', 4)])])], []),
('@page {\t b: 4; @margin}', [('@page', [], [
- ('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4),
- (';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'),
+ ('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4),
+ (';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'),
])], []),
('foo', [], ['no declaration block found']),
diff --git a/tinycss/tests/test_decoding.py b/tinycss/tests/test_decoding.py
index 42df0c3..eaa8019 100644
--- a/tinycss/tests/test_decoding.py
+++ b/tinycss/tests/test_decoding.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for decoding bytes to Unicode
-----------------------------------
@@ -11,7 +11,6 @@
from __future__ import unicode_literals
import pytest
-
from tinycss.decoding import decode
@@ -30,13 +29,13 @@ def params(css, encoding, use_bom=False, expect_error=False, **kwargs):
params('£', 'ShiftJIS', linking_encoding='Shift-JIS'),
params('£', 'ShiftJIS', document_encoding='Shift-JIS'),
params('£', 'ShiftJIS', protocol_encoding='utf8',
- document_encoding='ShiftJIS'),
+ document_encoding='ShiftJIS'),
params('@charset "utf8"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf£8"; £', 'ShiftJIS', expect_error=True),
params('@charset "unknown-encoding"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf8"; £', 'ShiftJIS', document_encoding='ShiftJIS'),
params('£', 'ShiftJIS', linking_encoding='utf8',
- document_encoding='ShiftJIS'),
+ document_encoding='ShiftJIS'),
params('@charset "utf-32"; 𐂃', 'utf-32-be'),
params('@charset "Shift-JIS"; £', 'ShiftJIS'),
params('@charset "ISO-8859-8"; £', 'ShiftJIS', expect_error=True),
diff --git a/tinycss/tests/test_fonts3.py b/tinycss/tests/test_fonts3.py
new file mode 100644
index 0000000..ee8ab67
--- /dev/null
+++ b/tinycss/tests/test_fonts3.py
@@ -0,0 +1,144 @@
+# coding: utf-8
+"""
+ Tests for the Fonts 3 parser
+ ----------------------------
+
+ :copyright: (c) 2016 by Kozea.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+from __future__ import unicode_literals
+
+import pytest
+from tinycss.fonts3 import CSSFonts3Parser
+
+from . import assert_errors
+from .test_tokenizer import jsonify
+
+
+@pytest.mark.parametrize(('css', 'expected_family_names', 'expected_errors'), [
+ ('@font-feature-values foo {}', ('foo',), []),
+ ('@font-feature-values Foo Test {}', ('Foo Test',), []),
+ ('@font-feature-values \'Foo Test\' {}', ('Foo Test',), []),
+ ('@font-feature-values Foo Test, Foo Lol, "Foo tooo"', (
+ 'Foo Test', 'Foo Lol', 'Foo tooo'), []),
+ ('@font-feature-values Foo , Foo lol {}', ('Foo', 'Foo lol'), []),
+ ('@font-feature-values Foo , "Foobar" , Lol {}', (
+ 'Foo', 'Foobar', 'Lol'), []),
+ ('@font-feature-values Foo, {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values ,Foo {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values Test,"Foo", {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values Test "Foo" {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values Test Foo, Test "bar", "foo" {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values Test/Foo {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values /Foo {}', None, [
+ 'invalid @font-feature-values selector']),
+ ('@font-feature-values #Foo {}', None, [
+ 'invalid @font-feature-values selector']),
+ # TODO: this currently works but should not work
+ # ('@font-feature-values test@foo {}', None, [
+ # 'invalid @font-feature-values selector']),
+ ('@font-feature-values Hawaii 5-0 {}', None, [
+ 'invalid @font-feature-values selector']),
+])
+def test_font_feature_values_selectors(css, expected_family_names,
+ expected_errors):
+ stylesheet = CSSFonts3Parser().parse_stylesheet(css)
+ assert_errors(stylesheet.errors, expected_errors)
+
+ if stylesheet.rules:
+ assert len(stylesheet.rules) == 1
+ rule = stylesheet.rules[0]
+ assert rule.at_keyword == '@font-feature-values'
+ assert rule.family_names == expected_family_names
+
+
+@pytest.mark.parametrize(('css', 'expected_declarations', 'expected_errors'), [
+ ('@font-face {}', [], []),
+ ('@font-face test { src: "lol"; font-family: "bar" }', None, [
+ 'unexpected IDENT token in @font-face rule header']),
+ ('@font-face { src: "lol"; font-family: "bar" }', [
+ ('src', [('STRING', 'lol')]),
+ ('font-family', [('STRING', 'bar')])], []),
+ ('@font-face { src: "lol"; font-family: "bar"; src: "baz" }', [
+ ('src', [('STRING', 'lol')]),
+ ('font-family', [('STRING', 'bar')]),
+ ('src', [('STRING', 'baz')])], []),
+])
+def test_font_face_content(css, expected_declarations, expected_errors):
+ stylesheet = CSSFonts3Parser().parse_stylesheet(css)
+ assert_errors(stylesheet.errors, expected_errors)
+
+ def declarations(rule):
+ return [(decl.name, list(jsonify(decl.value)))
+ for decl in rule.declarations]
+
+ if expected_declarations is None:
+ assert stylesheet.rules == []
+ assert expected_errors
+ else:
+ assert len(stylesheet.rules) == 1
+ rule = stylesheet.rules[0]
+ assert rule.at_keyword == '@font-face'
+ assert declarations(rule) == expected_declarations
+
+
+@pytest.mark.parametrize(
+ ('css', 'expected_rules', 'expected_errors'), [
+ ('''@annotation{}''', None, [
+ '@annotation rule not allowed in stylesheet']),
+ ('''@font-feature-values foo {}''', None, []),
+ ('''@font-feature-values foo {
+ @swash { ornate: 1; }
+ @styleset { double-W: 14; sharp-terminals: 16 1; }
+ }''', [
+ ('@swash', [('ornate', [('INTEGER', 1)])]),
+ ('@styleset', [
+ ('double-w', [('INTEGER', 14)]),
+ ('sharp-terminals', [
+ ('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])])], []),
+ ('''@font-feature-values foo {
+ @swash { ornate: 14; }
+ @unknown { test: 1; }
+ }''', [('@swash', [('ornate', [('INTEGER', 14)])])], [
+ 'unknown at-rule in @font-feature-values context: @unknown']),
+ ('''@font-feature-values foo {
+ @annotation{boxed:1}
+ bad: 2;
+ @brokenstylesetbecauseofbadabove { sharp: 1}
+ @styleset { sharp-terminals: 16 1; @bad {}}
+ @styleset { @bad {} top-ignored: 3; top: 9000}
+ really-bad
+ }''', [
+ ('@annotation', [('boxed', [('INTEGER', 1)])]),
+ ('@styleset', [
+ ('sharp-terminals', [
+ ('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])]),
+ ('@styleset', [('top', [('INTEGER', 9000)])])], [
+ 'unexpected ; token in selector',
+ 'expected a property name, got ATKEYWORD',
+ 'expected a property name, got ATKEYWORD',
+ 'no declaration block found for ruleset']),
+ ])
+def test_font_feature_values_content(css, expected_rules, expected_errors):
+ stylesheet = CSSFonts3Parser().parse_stylesheet(css)
+ assert_errors(stylesheet.errors, expected_errors)
+
+ if expected_rules is not None:
+ assert len(stylesheet.rules) == 1
+ rule = stylesheet.rules[0]
+ assert rule.at_keyword == '@font-feature-values'
+
+ rules = [
+ (at_rule.at_keyword, [
+ (decl.name, list(jsonify(decl.value)))
+ for decl in at_rule.declarations])
+ for at_rule in rule.at_rules] if rule.at_rules else None
+ assert rules == expected_rules
diff --git a/tinycss/tests/test_page3.py b/tinycss/tests/test_page3.py
index 15c9e57..7d2b2d2 100644
--- a/tinycss/tests/test_page3.py
+++ b/tinycss/tests/test_page3.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for the Paged Media 3 parser
----------------------------------
@@ -11,10 +11,10 @@
from __future__ import unicode_literals
import pytest
-
from tinycss.page3 import CSSPage3Parser
-from .test_tokenizer import jsonify
+
from . import assert_errors
+from .test_tokenizer import jsonify
@pytest.mark.parametrize(('css', 'expected_selector',
@@ -24,6 +24,7 @@
('@page :first {}', (None, 'first'), (0, 1, 0), []),
('@page:left{}', (None, 'left'), (0, 0, 1), []),
('@page :right {}', (None, 'right'), (0, 0, 1), []),
+ ('@page :blank{}', (None, 'blank'), (0, 1, 0), []),
('@page :last {}', None, None, ['invalid @page selector']),
('@page : first {}', None, None, ['invalid @page selector']),
@@ -55,7 +56,7 @@ def test_selectors(css, expected_selector, expected_specificity,
@pytest.mark.parametrize(('css', 'expected_declarations',
- 'expected_rules','expected_errors'), [
+ 'expected_rules', 'expected_errors'), [
('@page {}', [], [], []),
('@page { foo: 4; bar: z }',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []),
@@ -67,7 +68,7 @@ def test_selectors(css, expected_selector, expected_specificity,
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[('@top-center', [('content', [('STRING', 'Awesome Title')])]),
('@bottom-left', [('content', [
- ('FUNCTION', 'counter', [('IDENT', 'page')])])])],
+ ('FUNCTION', 'counter', [('IDENT', 'page')])])])],
[]),
('''@page { foo: 4;
@bottom-top { content: counter(page) }
diff --git a/tinycss/tests/test_tokenizer.py b/tinycss/tests/test_tokenizer.py
index 4508204..f3e7a6f 100644
--- a/tinycss/tests/test_tokenizer.py
+++ b/tinycss/tests/test_tokenizer.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
Tests for the tokenizer
-----------------------
@@ -10,115 +10,115 @@
from __future__ import unicode_literals
-import sys
import os
+import sys
import pytest
-
from tinycss.tokenizer import (
- python_tokenize_flat, cython_tokenize_flat, regroup)
+ cython_tokenize_flat, python_tokenize_flat, regroup)
def test_speedups():
- if os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS'): # pragma: no cover
+ is_pypy = hasattr(sys, 'pypy_translation_info')
+ env_skip_tests = os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS')
+ # pragma: no cover
+ if is_pypy or env_skip_tests:
return
assert cython_tokenize_flat is not None, (
'Cython speedups are not installed, related tests will '
'be skipped. Set the TINYCSS_SKIP_SPEEDUPS_TESTS environment '
- 'variable if this is expected (eg. on PyPy).')
+ 'variable if this is expected.')
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
- (tokenize,) + test_data
- for tokenize in (python_tokenize_flat, cython_tokenize_flat)
- for test_data in [
- ('', []),
- ('red -->',
- [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
- # Longest match rule: no CDC
- ('red-->',
- [('IDENT', 'red--'), ('DELIM', '>')]),
-
- (r'''p[example="\
-foo(int x) {\
- this.x = x;\
-}\
-"]''', [
- ('IDENT', 'p'),
- ('[', '['),
- ('IDENT', 'example'),
- ('DELIM', '='),
- ('STRING', 'foo(int x) { this.x = x;}'),
- (']', ']')]),
-
- #### Numbers are parsed
- ('42 .5 -4pX 1.25em 30%',
- [('INTEGER', 42), ('S', ' '),
- ('NUMBER', .5), ('S', ' '),
- # units are normalized to lower-case:
- ('DIMENSION', -4, 'px'), ('S', ' '),
- ('DIMENSION', 1.25, 'em'), ('S', ' '),
- ('PERCENTAGE', 30, '%')]),
-
- #### URLs are extracted
- ('url(foo.png)', [('URI', 'foo.png')]),
- ('url("foo.png")', [('URI', 'foo.png')]),
-
- #### Escaping
-
- (r'/* Comment with a \ backslash */',
- [('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged
-
- # backslash followed by a newline in a string: ignored
- ('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
-
- # backslash followed by a newline outside a string: stands for itself
- ('Lorem\\\nIpsum', [
- ('IDENT', 'Lorem'), ('DELIM', '\\'),
- ('S', '\n'), ('IDENT', 'Ipsum')]),
-
- # Cancel the meaning of special characters
- (r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal
- (r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
- (r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
- (r'Lorem+Ipsum', [('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
- (r'url(foo\).png)', [('URI', 'foo).png')]),
-
- # Unicode and backslash escaping
- ('\\26 B', [('IDENT', '&B')]),
- ('\\&B', [('IDENT', '&B')]),
- ('@\\26\tB', [('ATKEYWORD', '@&B')]),
- ('@\\&B', [('ATKEYWORD', '@&B')]),
- ('#\\26\nB', [('HASH', '#&B')]),
- ('#\\&B', [('HASH', '#&B')]),
- ('\\26\r\nB(', [('FUNCTION', '&B(')]),
- ('\\&B(', [('FUNCTION', '&B(')]),
- (r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
- (r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
- (r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
- (r'"\26 B"', [('STRING', '&B')]),
- (r"'\000026B'", [('STRING', '&B')]),
- (r'"\&B"', [('STRING', '&B')]),
- (r'url("\26 B")', [('URI', '&B')]),
- (r'url(\26 B)', [('URI', '&B')]),
- (r'url("\&B")', [('URI', '&B')]),
- (r'url(\&B)', [('URI', '&B')]),
- (r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
-
- #### Bad strings
-
- # String ends at EOF without closing: no error, parsed
- ('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
- # Unescaped newline: ends the string, error, unparsed
- ('"Lorem\\26Ipsum\n', [
- ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
- # Tokenization restarts after the newline, so the second " starts
- # a new string (which ends at EOF without errors, as above.)
- ('"Lorem\\26Ipsum\ndolor" sit', [
- ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
- ('IDENT', 'dolor'), ('STRING', ' sit')]),
-
-]])
+ (tokenize,) + test_data
+ for tokenize in (python_tokenize_flat, cython_tokenize_flat)
+ for test_data in [
+ ('', []),
+ ('red -->', [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
+ # Longest match rule: no CDC
+ ('red-->', [('IDENT', 'red--'), ('DELIM', '>')]),
+ (r'p[example="foo(int x) { this.x = x;}"]', [
+ ('IDENT', 'p'),
+ ('[', '['),
+ ('IDENT', 'example'),
+ ('DELIM', '='),
+ ('STRING', 'foo(int x) { this.x = x;}'),
+ (']', ']')]),
+
+ # Numbers are parsed
+ ('42 .5 -4pX 1.25em 30%', [
+ ('INTEGER', 42), ('S', ' '),
+ ('NUMBER', .5), ('S', ' '),
+ # units are normalized to lower-case:
+ ('DIMENSION', -4, 'px'), ('S', ' '),
+ ('DIMENSION', 1.25, 'em'), ('S', ' '),
+ ('PERCENTAGE', 30, '%')]),
+
+ # URLs are extracted
+ ('url(foo.png)', [('URI', 'foo.png')]),
+ ('url("foo.png")', [('URI', 'foo.png')]),
+
+ # Escaping
+
+ (r'/* Comment with a \ backslash */', [
+ ('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged
+
+ # backslash followed by a newline in a string: ignored
+ ('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
+
+ # backslash followed by a newline outside a string: stands for itself
+ ('Lorem\\\nIpsum', [
+ ('IDENT', 'Lorem'), ('DELIM', '\\'),
+ ('S', '\n'), ('IDENT', 'Ipsum')]),
+
+ # Cancel the meaning of special characters
+ (r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal
+ (r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]),
+ (r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]),
+ (r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
+ (r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]),
+ (r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]),
+ (r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
+ (r'Lorem+Ipsum', [
+ ('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
+ (r'url(foo\).png)', [('URI', 'foo).png')]),
+
+ # Unicode and backslash escaping
+ ('\\26 B', [('IDENT', '&B')]),
+ ('\\&B', [('IDENT', '&B')]),
+ ('@\\26\tB', [('ATKEYWORD', '@&B')]),
+ ('@\\&B', [('ATKEYWORD', '@&B')]),
+ ('#\\26\nB', [('HASH', '#&B')]),
+ ('#\\&B', [('HASH', '#&B')]),
+ ('\\26\r\nB(', [('FUNCTION', '&B(')]),
+ ('\\&B(', [('FUNCTION', '&B(')]),
+ (r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
+ (r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
+ (r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
+ (r'"\26 B"', [('STRING', '&B')]),
+ (r"'\000026B'", [('STRING', '&B')]),
+ (r'"\&B"', [('STRING', '&B')]),
+ (r'url("\26 B")', [('URI', '&B')]),
+ (r'url(\26 B)', [('URI', '&B')]),
+ (r'url("\&B")', [('URI', '&B')]),
+ (r'url(\&B)', [('URI', '&B')]),
+ (r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
+
+ # Bad strings
+
+ # String ends at EOF without closing: no error, parsed
+ ('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
+ # Unescaped newline: ends the string, error, unparsed
+ ('"Lorem\\26Ipsum\n', [
+ ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
+ # Tokenization restarts after the newline, so the second " starts
+ # a new string (which ends at EOF without errors, as above.)
+ ('"Lorem\\26Ipsum\ndolor" sit', [
+ ('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
+ ('IDENT', 'dolor'), ('STRING', ' sit')]),
+
+ ]])
def test_tokens(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
@@ -156,64 +156,64 @@ def test_positions(tokenize):
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
- (tokenize,) + test_data
- for tokenize in (python_tokenize_flat, cython_tokenize_flat)
- for test_data in [
- ('', []),
- (r'Lorem\26 "i\psum"4px', [
- ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
-
- ('not([[lorem]]{ipsum (42)})', [
- ('FUNCTION', 'not', [
- ('[', [
+ (tokenize,) + test_data
+ for tokenize in (python_tokenize_flat, cython_tokenize_flat)
+ for test_data in [
+ ('', []),
+ (r'Lorem\26 "i\psum"4px', [
+ ('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
+
+ ('not([[lorem]]{ipsum (42)})', [
+ ('FUNCTION', 'not', [
('[', [
- ('IDENT', 'lorem'),
+ ('[', [
+ ('IDENT', 'lorem'),
+ ]),
]),
- ]),
- ('{', [
- ('IDENT', 'ipsum'),
- ('S', ' '),
- ('(', [
- ('INTEGER', 42),
+ ('{', [
+ ('IDENT', 'ipsum'),
+ ('S', ' '),
+ ('(', [
+ ('INTEGER', 42),
+ ])
])
- ])
- ])]),
-
- # Close everything at EOF, no error
- ('a[b{"d', [
- ('IDENT', 'a'),
- ('[', [
- ('IDENT', 'b'),
- ('{', [
- ('STRING', 'd'),
+ ])]),
+
+ # Close everything at EOF, no error
+ ('a[b{"d', [
+ ('IDENT', 'a'),
+ ('[', [
+ ('IDENT', 'b'),
+ ('{', [
+ ('STRING', 'd'),
+ ]),
]),
]),
- ]),
-
- # Any remaining ), ] or } token is a nesting error
- ('a[b{d]e}', [
- ('IDENT', 'a'),
- ('[', [
- ('IDENT', 'b'),
- ('{', [
- ('IDENT', 'd'),
- (']', ']'), # The error is visible here
- ('IDENT', 'e'),
+
+ # Any remaining ), ] or } token is a nesting error
+ ('a[b{d]e}', [
+ ('IDENT', 'a'),
+ ('[', [
+ ('IDENT', 'b'),
+ ('{', [
+ ('IDENT', 'd'),
+ (']', ']'), # The error is visible here
+ ('IDENT', 'e'),
+ ]),
]),
]),
- ]),
- # ref:
- ('a[b{d}e]', [
- ('IDENT', 'a'),
- ('[', [
- ('IDENT', 'b'),
- ('{', [
- ('IDENT', 'd'),
+ # ref:
+ ('a[b{d}e]', [
+ ('IDENT', 'a'),
+ ('[', [
+ ('IDENT', 'b'),
+ ('{', [
+ ('IDENT', 'd'),
+ ]),
+ ('IDENT', 'e'),
]),
- ('IDENT', 'e'),
]),
- ]),
-]])
+ ]])
def test_token_grouping(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
@@ -235,27 +235,27 @@ def jsonify(tokens):
@pytest.mark.parametrize(('tokenize', 'ignore_comments', 'expected_tokens'), [
- (tokenize,) + test_data
- for tokenize in (python_tokenize_flat, cython_tokenize_flat)
- for test_data in [
- (False, [
- ('COMMENT', '/* lorem */'),
- ('S', ' '),
- ('IDENT', 'ipsum'),
- ('[', [
- ('IDENT', 'dolor'),
- ('COMMENT', '/* sit */'),
+ (tokenize,) + test_data
+ for tokenize in (python_tokenize_flat, cython_tokenize_flat)
+ for test_data in [
+ (False, [
+ ('COMMENT', '/* lorem */'),
+ ('S', ' '),
+ ('IDENT', 'ipsum'),
+ ('[', [
+ ('IDENT', 'dolor'),
+ ('COMMENT', '/* sit */'),
+ ]),
+ ('BAD_COMMENT', '/* amet')
]),
- ('BAD_COMMENT', '/* amet')
- ]),
- (True, [
- ('S', ' '),
- ('IDENT', 'ipsum'),
- ('[', [
- ('IDENT', 'dolor'),
+ (True, [
+ ('S', ' '),
+ ('IDENT', 'ipsum'),
+ ('[', [
+ ('IDENT', 'dolor'),
+ ]),
]),
- ]),
-]])
+ ]])
def test_comments(tokenize, ignore_comments, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
@@ -266,20 +266,16 @@ def test_comments(tokenize, ignore_comments, expected_tokens):
@pytest.mark.parametrize(('tokenize', 'css_source'), [
- (tokenize, test_data)
- for tokenize in (python_tokenize_flat, cython_tokenize_flat)
- for test_data in [
- r'''p[example="\
-foo(int x) {\
- this.x = x;\
-}\
-"]''',
- '"Lorem\\26Ipsum\ndolor" sit',
- '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
- 'not([[lorem]]{ipsum (42)})',
- 'a[b{d]e}',
- 'a[b{"d',
-]])
+ (tokenize, test_data)
+ for tokenize in (python_tokenize_flat, cython_tokenize_flat)
+ for test_data in [
+ r'p[example="foo(int x) { this.x = x;}"]',
+ '"Lorem\\26Ipsum\ndolor" sit',
+ '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
+ 'not([[lorem]]{ipsum (42)})',
+ 'a[b{d]e}',
+ 'a[b{"d',
+ ]])
def test_token_serialize_css(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
diff --git a/tinycss/token_data.py b/tinycss/token_data.py
index d2c2cba..cb99d8d 100644
--- a/tinycss/token_data.py
+++ b/tinycss/token_data.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.token_data
------------------
@@ -11,12 +11,11 @@
from __future__ import unicode_literals
-import re
-import sys
-import operator
import functools
+import operator
+import re
import string
-
+import sys
# * Raw strings with the r'' notation are used so that \ do not need
# to be escaped.
@@ -43,7 +42,8 @@
w [ \t\r\n\f]*
nonascii [^\0-\237]
unicode \\([0-9a-f]{{1,6}})(\r\n|[ \n\r\t\f])?
- escape {unicode}|\\[^\n\r\f0-9a-f]
+ simple_escape [^\n\r\f0-9a-f]
+ escape {unicode}|\\{simple_escape}
nmstart [_a-z]|{nonascii}|{escape}
nmchar [_a-z0-9-]|{nonascii}|{escape}
name {nmchar}+
@@ -186,6 +186,7 @@ def _init():
for names in dispatch
)
+
_init()
@@ -196,6 +197,7 @@ def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode):
else:
return '\N{REPLACEMENT CHARACTER}' # U+FFFD
+
UNICODE_UNESCAPE = functools.partial(
re.compile(COMPILED_MACROS['unicode'], re.I).sub,
_unicode_replace)
@@ -205,7 +207,7 @@ def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode):
'')
SIMPLE_UNESCAPE = functools.partial(
- re.compile(r'\\(.)').sub,
+ re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'], re.I).sub,
# Same as r'\1', but faster on CPython
operator.methodcaller('group', 1))
@@ -328,6 +330,18 @@ def __repr__(self):
return (''
.format(self, self.unit or ''))
+ def __eq__(self, other):
+ if type(self) != type(other):
+ raise TypeError(
+ 'Cannot compare {0} and {1}'.format(type(self), type(other)))
+ else:
+ return all(
+ self.type_ == other.type_,
+ self._as_css == other._as_css,
+ self.value == other.value,
+ self.unit == other.unit,
+ )
+
class ContainerToken(object):
"""A token that contains other (nested) tokens.
diff --git a/tinycss/tokenizer.py b/tinycss/tokenizer.py
index 5540027..114b19b 100644
--- a/tinycss/tokenizer.py
+++ b/tinycss/tokenizer.py
@@ -1,4 +1,4 @@
-# coding: utf8
+# coding: utf-8
"""
tinycss.tokenizer
-----------------
@@ -17,20 +17,20 @@
from . import token_data
-def tokenize_flat(css_source, ignore_comments=True,
- # Make these local variable to avoid global lookups in the loop
- tokens_dispatch=token_data.TOKEN_DISPATCH,
- unicode_unescape=token_data.UNICODE_UNESCAPE,
- newline_unescape=token_data.NEWLINE_UNESCAPE,
- simple_unescape=token_data.SIMPLE_UNESCAPE,
- find_newlines=token_data.FIND_NEWLINES,
- Token=token_data.Token,
- len=len,
- int=int,
- float=float,
- list=list,
- _None=None,
-):
+def tokenize_flat(
+ css_source, ignore_comments=True,
+ # Make these local variable to avoid global lookups in the loop
+ tokens_dispatch=token_data.TOKEN_DISPATCH,
+ unicode_unescape=token_data.UNICODE_UNESCAPE,
+ newline_unescape=token_data.NEWLINE_UNESCAPE,
+ simple_unescape=token_data.SIMPLE_UNESCAPE,
+ find_newlines=token_data.FIND_NEWLINES,
+ Token=token_data.Token,
+ len=len,
+ int=int,
+ float=float,
+ list=list,
+ _None=None):
"""
:param css_source:
CSS as an unicode string
@@ -79,8 +79,8 @@ def tokenize_flat(css_source, ignore_comments=True,
value = match.group(1)
value = float(value) if '.' in value else int(value)
unit = match.group(2)
- unit = unicode_unescape(unit)
unit = simple_unescape(unit)
+ unit = unicode_unescape(unit)
unit = unit.lower() # normalize
elif type_ == 'PERCENTAGE':
value = css_value[:-1]
@@ -94,20 +94,20 @@ def tokenize_flat(css_source, ignore_comments=True,
value = int(value)
type_ = 'INTEGER'
elif type_ in ('IDENT', 'ATKEYWORD', 'HASH', 'FUNCTION'):
- value = unicode_unescape(css_value)
- value = simple_unescape(value)
+ value = simple_unescape(css_value)
+ value = unicode_unescape(value)
elif type_ == 'URI':
value = match.group(1)
if value and value[0] in '"\'':
value = value[1:-1] # Remove quotes
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
elif type_ == 'STRING':
value = css_value[1:-1] # Remove quotes
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
# BAD_STRING can only be one of:
# * Unclosed string at the end of the stylesheet:
# Close the string, but this is not an error.
@@ -120,8 +120,8 @@ def tokenize_flat(css_source, ignore_comments=True,
type_ = 'STRING'
value = css_value[1:] # Remove quote
value = newline_unescape(value)
- value = unicode_unescape(value)
value = simple_unescape(value)
+ value = unicode_unescape(value)
else:
value = css_value
tokens.append(Token(type_, css_value, value, unit, line, column))
@@ -158,10 +158,9 @@ def regroup(tokens):
tokens = iter(tokens)
eof = [False]
- def _regroup_inner(stop_at=None,
- tokens=tokens, pairs=pairs, eof=eof,
- ContainerToken=token_data.ContainerToken,
- FunctionToken=token_data.FunctionToken):
+ def _regroup_inner(stop_at=None, tokens=tokens, pairs=pairs, eof=eof,
+ ContainerToken=token_data.ContainerToken,
+ FunctionToken=token_data.FunctionToken):
for token in tokens:
type_ = token.type
if type_ == stop_at:
diff --git a/tinycss/version.py b/tinycss/version.py
index 68c0733..f896235 100644
--- a/tinycss/version.py
+++ b/tinycss/version.py
@@ -1 +1 @@
-VERSION = '0.2'
+VERSION = '0.4'
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index e951568..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-[tox]
-envlist = py26,py27,py31,py32,pypy,sphinx-doctests
-
-[testenv]
-deps = pytest
-changedir = {toxworkdir}/log
-commands = py.test --pyargs tinycss []
-
-[testenv:pypy]
-setenv = TINYCSS_SKIP_SPEEDUPS_TESTS=1
-
-[testenv:sphinx-doctests]
-basepython = python3
-deps =
- Sphinx
- cssselect
- lxml
-changedir = docs
-commands = sphinx-build -b doctest . _build/html